From dbd750555f66e3c2fea102275ffa809188ee0d5d Mon Sep 17 00:00:00 2001 From: Yuna Verheyden Date: Fri, 20 Dec 2024 17:02:16 +0100 Subject: [PATCH] Update all open-telemetry packages to 0.116.0 (#4466) --- .goreleaser.yml | 2 +- CHANGELOG.md | 5 + cmd/tempo-query/tempo/plugin.go | 16 +- cmd/tempo-serverless/cloud-run/go.mod | 71 +- cmd/tempo-serverless/cloud-run/go.sum | 185 +- cmd/tempo-serverless/lambda/go.mod | 73 +- cmd/tempo-serverless/lambda/go.sum | 185 +- .../docker-compose/alloy/docker-compose.yaml | 7 +- .../cross-cluster/tempo-distributed-a.yaml | 12 +- .../cross-cluster/tempo-distributed-b.yaml | 12 +- .../docker-compose/debug/docker-compose.yaml | 9 +- .../distributed/tempo-distributed.yaml | 14 +- example/docker-compose/local/tempo.yaml | 12 +- .../docker-compose/multi-tenant/tempo.yaml | 12 +- .../docker-compose.yaml | 7 +- .../otel-collector/docker-compose.yaml | 7 +- example/docker-compose/shared/tempo.yaml | 12 +- go.mod | 304 +- go.sum | 648 +- integration/bench/config.yaml | 3 +- integration/e2e/config-cross-cluster-a.yaml | 3 +- integration/e2e/config-cross-cluster-b.yaml | 3 +- integration/e2e/config-encodings.tmpl.yaml | 3 + integration/e2e/config-https.yaml | 3 +- integration/e2e/config-limits-429.yaml | 1 + .../e2e/config-limits-partial-success.yaml | 1 + integration/e2e/config-limits-query.yaml | 3 +- integration/e2e/config-limits.yaml | 3 + .../config-metrics-generator-targetinfo.yaml | 3 +- integration/e2e/config-metrics-generator.yaml | 3 +- .../e2e/config-multi-tenant-local.yaml | 3 + integration/e2e/config-plugin-test.yaml | 3 + integration/e2e/config-query-range.yaml | 3 + .../config-all-in-one-azurite.yaml | 2 +- .../deployments/config-all-in-one-gcs.yaml | 1 + .../deployments/config-all-in-one-local.yaml | 3 + .../e2e/deployments/config-all-in-one-s3.yaml | 1 + .../config-microservices.tmpl.yaml | 3 +- .../config-scalable-single-binary.yaml | 3 +- integration/e2e/receivers_test.go | 6 +- .../e2e/serverless/config-serverless-gcr.yaml | 1 + .../serverless/config-serverless-lambda.yaml | 1 + integration/microservices/tempo.yaml | 7 +- integration/util/util.go | 8 +- modules/distributor/forwarder/forwarder.go | 9 +- .../distributor/receiver/metrics_provider.go | 12 +- modules/distributor/receiver/shim.go | 10 +- modules/distributor/receiver/shim_test.go | 4 +- .../generator/registry/native_histogram.go | 3 +- modules/generator/storage/instance_test.go | 29 +- tools/packaging/tempo.yaml | 48 + vendor/cloud.google.com/go/auth/CHANGES.md | 117 + vendor/cloud.google.com/go/auth/README.md | 42 +- vendor/cloud.google.com/go/auth/auth.go | 36 +- .../go/auth/credentials/detect.go | 6 +- .../go/auth/credentials/filetypes.go | 18 +- .../go/auth/credentials/idtoken/compute.go | 4 +- .../go/auth/credentials/idtoken/idtoken.go | 8 +- .../go/auth/credentials/idtoken/validate.go | 4 +- .../auth/credentials/impersonate/idtoken.go | 2 +- .../credentials/impersonate/impersonate.go | 2 +- .../internal/externalaccount/aws_provider.go | 38 +- .../externalaccount/externalaccount.go | 44 +- .../internal/externalaccount/url_provider.go | 1 + .../internal/externalaccount/x509_provider.go | 63 + .../go/auth/credentials/selfsignedjwt.go | 4 + .../go/auth/grpctransport/directpath.go | 13 +- .../go/auth/grpctransport/grpctransport.go | 82 +- .../go/auth/httptransport/httptransport.go | 2 + .../go/auth/httptransport/transport.go | 61 +- .../go/auth/internal/compute/compute.go | 66 + .../go/auth/internal/compute/manufacturer.go | 22 + .../internal/compute/manufacturer_linux.go | 23 + .../internal/compute/manufacturer_windows.go | 46 + .../go/auth/internal/credsfile/filetype.go | 30 +- .../go/auth/internal/internal.go | 32 +- .../go/auth/internal/transport/cba.go | 90 +- .../transport/cert/enterprise_cert.go | 8 +- .../transport/cert/secureconnect_cert.go | 10 +- .../internal/transport/cert/workload_cert.go | 5 +- .../go/auth/internal/transport/s2a.go | 100 +- .../go/auth/internal/transport/transport.go | 6 +- .../go/auth/oauth2adapt/CHANGES.md | 28 + .../go/auth/oauth2adapt/oauth2adapt.go | 46 +- .../cloud.google.com/go/auth/threelegged.go | 2 +- .../go/compute/metadata/CHANGES.md | 14 + .../go/compute/metadata/metadata.go | 3 + .../go/compute/metadata/retry_linux.go | 9 +- vendor/cloud.google.com/go/iam/CHANGES.md | 7 + .../sdk/storage/azblob/CHANGELOG.md | 19 + .../sdk/storage/azblob/appendblob/client.go | 7 + .../sdk/storage/azblob/assets.json | 2 +- .../sdk/storage/azblob/blob/client.go | 20 +- .../sdk/storage/azblob/blob/models.go | 2 +- .../storage/azblob/bloberror/error_codes.go | 2 + .../storage/azblob/blockblob/chunkwriting.go | 4 +- .../sdk/storage/azblob/blockblob/client.go | 9 +- .../sdk/storage/azblob/ci.yml | 1 + .../sdk/storage/azblob/common.go | 2 +- .../storage/azblob/internal/base/clients.go | 5 +- .../azblob/internal/exported/blob_batch.go | 2 +- .../azblob/internal/exported/exported.go | 2 +- .../azblob/internal/exported/version.go | 2 +- .../azblob/internal/generated/autorest.md | 10 +- .../generated/zz_appendblob_client.go | 247 +- .../internal/generated/zz_blob_client.go | 1337 +-- .../internal/generated/zz_blockblob_client.go | 331 +- .../azblob/internal/generated/zz_constants.go | 3 +- .../internal/generated/zz_container_client.go | 560 +- .../azblob/internal/generated/zz_models.go | 1233 +-- .../internal/generated/zz_models_serde.go | 97 +- .../azblob/internal/generated/zz_options.go | 1469 +++ .../internal/generated/zz_pageblob_client.go | 478 +- .../internal/generated/zz_response_types.go | 156 +- .../internal/generated/zz_service_client.go | 140 +- .../internal/generated/zz_time_rfc1123.go | 23 +- .../internal/generated/zz_time_rfc3339.go | 35 +- .../internal/generated/zz_xml_helper.go | 26 +- .../storage/azblob/internal/shared/shared.go | 27 +- .../sdk/storage/azblob/pageblob/client.go | 7 + .../apps/confidential/confidential.go | 9 +- .../apps/internal/base/base.go | 30 +- .../apps/internal/json/json.go | 54 +- .../apps/internal/local/server.go | 3 +- .../apps/internal/oauth/oauth.go | 2 +- .../internal/oauth/ops/authority/authority.go | 23 +- vendor/github.com/IBM/sarama/Dockerfile.kafka | 2 +- .../github.com/IBM/sarama/async_producer.go | 4 +- vendor/github.com/IBM/sarama/config.go | 2 +- .../github.com/IBM/sarama/docker-compose.yml | 1 - .../github.com/IBM/sarama/offset_manager.go | 28 +- .../IBM/sarama/transaction_manager.go | 2 +- .../VividCortex/gohistogram/.gitignore | 2 - .../VividCortex/gohistogram/README.md | 80 - .../VividCortex/gohistogram/histogram.go | 23 - .../gohistogram/numerichistogram.go | 160 - .../gohistogram/weightedhistogram.go | 190 - .../alecthomas/units/renovate.json5 | 4 + vendor/github.com/alicebob/gopher-json/doc.go | 40 +- vendor/github.com/antchfx/xmlquery/.gitignore | 32 + .../gohistogram => antchfx/xmlquery}/LICENSE | 4 +- vendor/github.com/antchfx/xmlquery/README.md | 302 + vendor/github.com/antchfx/xmlquery/cache.go | 43 + .../antchfx/xmlquery/cached_reader.go | 69 + vendor/github.com/antchfx/xmlquery/node.go | 341 + vendor/github.com/antchfx/xmlquery/options.go | 33 + vendor/github.com/antchfx/xmlquery/parse.go | 414 + vendor/github.com/antchfx/xmlquery/query.go | 304 + vendor/github.com/antchfx/xpath/.gitignore | 32 + .../xpath}/LICENSE | 6 +- vendor/github.com/antchfx/xpath/README.md | 167 + vendor/github.com/antchfx/xpath/build.go | 718 ++ vendor/github.com/antchfx/xpath/cache.go | 80 + vendor/github.com/antchfx/xpath/func.go | 679 ++ vendor/github.com/antchfx/xpath/func_go110.go | 16 + .../antchfx/xpath/func_pre_go110.go | 22 + vendor/github.com/antchfx/xpath/operator.go | 288 + vendor/github.com/antchfx/xpath/parse.go | 1256 +++ vendor/github.com/antchfx/xpath/query.go | 1437 +++ vendor/github.com/antchfx/xpath/xpath.go | 176 + .../thrift/lib/go/thrift/configuration.go | 76 +- .../apache/thrift/lib/go/thrift/exception.go | 24 +- .../thrift/lib/go/thrift/header_protocol.go | 5 + .../thrift/lib/go/thrift/header_transport.go | 80 +- .../apache/thrift/lib/go/thrift/protocol.go | 6 +- .../lib/go/thrift/simple_json_protocol.go | 47 +- .../thrift/lib/go/thrift/ssl_server_socket.go | 3 + .../aws/aws-msk-iam-sasl-signer-go/LICENSE | 175 + .../aws/aws-msk-iam-sasl-signer-go/NOTICE | 1 + .../signer/msk_auth_token_provider.go | 305 + .../signer/version.go | 3 + .../aws/aws-sdk-go-v2/LICENSE.txt} | 0 .../github.com/aws/aws-sdk-go-v2/NOTICE.txt | 3 + .../aws/aws-sdk-go-v2/aws/config.go | 197 + .../aws/aws-sdk-go-v2/aws/context.go | 22 + .../aws/aws-sdk-go-v2/aws/credential_cache.go | 224 + .../aws/aws-sdk-go-v2/aws/credentials.go | 170 + .../aws/aws-sdk-go-v2/aws/defaults/auto.go | 38 + .../aws/defaults/configuration.go | 43 + .../aws-sdk-go-v2/aws/defaults/defaults.go | 50 + .../aws/aws-sdk-go-v2/aws/defaults/doc.go | 2 + .../aws/aws-sdk-go-v2/aws/defaultsmode.go | 95 + .../github.com/aws/aws-sdk-go-v2/aws/doc.go | 62 + .../aws/aws-sdk-go-v2/aws/endpoints.go | 229 + .../aws/aws-sdk-go-v2/aws/errors.go | 9 + .../aws/aws-sdk-go-v2/aws/from_ptr.go | 365 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 6 + .../aws/aws-sdk-go-v2/aws/logging.go | 119 + .../aws/aws-sdk-go-v2/aws/logging_generate.go | 95 + .../aws-sdk-go-v2/aws/middleware/metadata.go | 201 + .../aws/middleware/middleware.go | 168 + .../aws-sdk-go-v2/aws/middleware/osname.go | 24 + .../aws/middleware/osname_go115.go | 24 + .../aws/middleware/recursion_detection.go | 94 + .../aws/middleware/request_id.go | 27 + .../aws/middleware/request_id_retriever.go | 49 + .../aws/middleware/user_agent.go | 261 + .../aws-sdk-go-v2/aws/protocol/query/array.go | 72 + .../aws/protocol/query/encoder.go | 80 + .../aws-sdk-go-v2/aws/protocol/query/map.go | 78 + .../aws/protocol/query/middleware.go | 62 + .../aws/protocol/query/object.go | 69 + .../aws-sdk-go-v2/aws/protocol/query/value.go | 115 + .../aws/protocol/restjson/decoder_util.go | 85 + .../aws/protocol/xml/error_utils.go | 48 + .../aws/ratelimit/token_bucket.go | 96 + .../aws/ratelimit/token_rate_limit.go | 83 + .../aws/aws-sdk-go-v2/aws/request.go | 25 + .../aws/aws-sdk-go-v2/aws/retry/adaptive.go | 156 + .../aws/retry/adaptive_ratelimit.go | 158 + .../aws/retry/adaptive_token_bucket.go | 83 + .../aws/aws-sdk-go-v2/aws/retry/doc.go | 80 + .../aws/aws-sdk-go-v2/aws/retry/errors.go | 20 + .../aws-sdk-go-v2/aws/retry/jitter_backoff.go | 49 + .../aws/aws-sdk-go-v2/aws/retry/metadata.go | 52 + .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 330 + .../aws/aws-sdk-go-v2/aws/retry/retry.go | 90 + .../aws/retry/retryable_error.go | 201 + .../aws/aws-sdk-go-v2/aws/retry/standard.go | 258 + .../aws-sdk-go-v2/aws/retry/throttle_error.go | 60 + .../aws-sdk-go-v2/aws/retry/timeout_error.go | 52 + .../aws/aws-sdk-go-v2/aws/retryer.go | 127 + .../aws/aws-sdk-go-v2/aws/runtime.go | 14 + .../aws/signer/internal/v4/cache.go | 115 + .../aws/signer/internal/v4/const.go | 40 + .../aws/signer/internal/v4/header_rules.go | 82 + .../aws/signer/internal/v4/headers.go | 71 + .../aws/signer/internal/v4/hmac.go | 13 + .../aws/signer/internal/v4/host.go | 75 + .../aws/signer/internal/v4/scope.go | 13 + .../aws/signer/internal/v4/time.go | 36 + .../aws/signer/internal/v4/util.go | 80 + .../aws-sdk-go-v2/aws/signer/v4/middleware.go | 408 + .../aws/signer/v4/presign_middleware.go | 127 + .../aws/aws-sdk-go-v2/aws/signer/v4/stream.go | 86 + .../aws/aws-sdk-go-v2/aws/signer/v4/v4.go | 548 + .../aws/aws-sdk-go-v2/aws/to_ptr.go | 297 + .../aws/transport/http/client.go | 310 + .../aws/transport/http/content_type.go | 42 + .../aws/transport/http/response_error.go | 33 + .../http/response_error_middleware.go | 54 + .../aws/transport/http/timeout_read_closer.go | 104 + .../github.com/aws/aws-sdk-go-v2/aws/types.go | 42 + .../aws/aws-sdk-go-v2/aws/version.go | 8 + .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 479 + .../aws/aws-sdk-go-v2/config/LICENSE.txt} | 0 .../aws/aws-sdk-go-v2/config/config.go | 213 + .../aws/aws-sdk-go-v2/config/defaultsmode.go | 47 + .../aws/aws-sdk-go-v2/config/doc.go | 20 + .../aws/aws-sdk-go-v2/config/env_config.go | 738 ++ .../aws/aws-sdk-go-v2/config/generate.go | 4 + .../config/go_module_metadata.go | 6 + .../aws/aws-sdk-go-v2/config/load_options.go | 1046 ++ .../aws/aws-sdk-go-v2/config/local.go | 51 + .../aws/aws-sdk-go-v2/config/provider.go | 670 ++ .../aws/aws-sdk-go-v2/config/resolve.go | 341 + .../config/resolve_bearer_token.go | 122 + .../config/resolve_credentials.go | 499 + .../aws/aws-sdk-go-v2/config/shared_config.go | 1492 +++ .../aws-sdk-go-v2/credentials/CHANGELOG.md | 409 + .../aws-sdk-go-v2/credentials/LICENSE.txt} | 0 .../aws/aws-sdk-go-v2/credentials/doc.go | 4 + .../credentials/ec2rolecreds/doc.go | 58 + .../credentials/ec2rolecreds/provider.go | 229 + .../endpointcreds/internal/client/client.go | 148 + .../internal/client/middleware.go | 120 + .../credentials/endpointcreds/provider.go | 136 + .../credentials/go_module_metadata.go | 6 + .../credentials/processcreds/doc.go | 92 + .../credentials/processcreds/provider.go | 281 + .../aws-sdk-go-v2/credentials/ssocreds/doc.go | 81 + .../credentials/ssocreds/sso_cached_token.go | 233 + .../ssocreds/sso_credentials_provider.go | 152 + .../ssocreds/sso_token_provider.go | 147 + .../credentials/static_provider.go | 53 + .../stscreds/assume_role_provider.go | 320 + .../stscreds/web_identity_provider.go | 150 + .../feature/ec2/imds/CHANGELOG.md | 253 + .../feature/ec2/imds/LICENSE.txt} | 0 .../feature/ec2/imds/api_client.go | 348 + .../feature/ec2/imds/api_op_GetDynamicData.go | 76 + .../feature/ec2/imds/api_op_GetIAMInfo.go | 102 + .../api_op_GetInstanceIdentityDocument.go | 109 + .../feature/ec2/imds/api_op_GetMetadata.go | 76 + .../feature/ec2/imds/api_op_GetRegion.go | 72 + .../feature/ec2/imds/api_op_GetToken.go | 118 + .../feature/ec2/imds/api_op_GetUserData.go | 60 + .../aws/aws-sdk-go-v2/feature/ec2/imds/doc.go | 11 + .../feature/ec2/imds/go_module_metadata.go | 6 + .../ec2/imds/internal/config/resolvers.go | 114 + .../feature/ec2/imds/request_middleware.go | 285 + .../feature/ec2/imds/token_provider.go | 261 + .../aws/aws-sdk-go-v2/internal/auth/scheme.go | 186 + .../internal/configsources/CHANGELOG.md | 223 + .../internal/configsources/LICENSE.txt} | 0 .../internal/configsources/config.go | 65 + .../internal/configsources/endpoints.go | 57 + .../configsources/go_module_metadata.go | 6 + .../internal/endpoints/awsrulesfn/arn.go | 94 + .../internal/endpoints/awsrulesfn/doc.go | 3 + .../internal/endpoints/awsrulesfn/generate.go | 7 + .../internal/endpoints/awsrulesfn/host.go | 51 + .../endpoints/awsrulesfn/partition.go | 75 + .../endpoints/awsrulesfn/partitions.go | 343 + .../endpoints/awsrulesfn/partitions.json | 213 + .../internal/endpoints/v2/CHANGELOG.md | 196 + .../internal/endpoints/v2/LICENSE.txt} | 0 .../internal/endpoints/v2/endpoints.go | 302 + .../endpoints/v2/go_module_metadata.go | 6 + .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 255 + .../aws-sdk-go-v2/internal/ini/LICENSE.txt | 202 + .../aws/aws-sdk-go-v2/internal/ini/errors.go | 22 + .../internal/ini/go_module_metadata.go | 6 + .../aws/aws-sdk-go-v2/internal/ini/ini.go | 56 + .../aws/aws-sdk-go-v2/internal/ini/parse.go | 109 + .../aws-sdk-go-v2/internal/ini/sections.go | 157 + .../aws/aws-sdk-go-v2/internal/ini/strings.go | 83 + .../aws/aws-sdk-go-v2/internal/ini/token.go | 32 + .../aws-sdk-go-v2/internal/ini/tokenize.go | 91 + .../aws/aws-sdk-go-v2/internal/ini/value.go | 104 + .../aws/aws-sdk-go-v2/internal/rand/rand.go | 33 + .../aws-sdk-go-v2/internal/sdk/interfaces.go | 9 + .../aws/aws-sdk-go-v2/internal/sdk/time.go | 74 + .../aws/aws-sdk-go-v2/internal/sdkio/byte.go | 12 + .../internal/shareddefaults/shared_config.go | 47 + .../aws-sdk-go-v2/internal/strings/strings.go | 11 + .../internal/sync/singleflight/LICENSE | 28 + .../internal/sync/singleflight/docs.go | 7 + .../sync/singleflight/singleflight.go | 210 + .../internal/timeconv/duration.go | 13 + .../internal/presigned-url/CHANGELOG.md | 240 + .../internal/presigned-url/LICENSE.txt | 202 + .../service/internal/presigned-url/context.go | 48 + .../service/internal/presigned-url/doc.go | 3 + .../presigned-url/go_module_metadata.go | 6 + .../internal/presigned-url/middleware.go | 110 + .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 327 + .../aws/aws-sdk-go-v2/service/sso/LICENSE.txt | 202 + .../aws-sdk-go-v2/service/sso/api_client.go | 526 + .../service/sso/api_op_GetRoleCredentials.go | 266 + .../service/sso/api_op_ListAccountRoles.go | 361 + .../service/sso/api_op_ListAccounts.go | 358 + .../service/sso/api_op_Logout.go | 261 + .../service/sso/deserializers.go | 1151 ++ .../aws/aws-sdk-go-v2/service/sso/doc.go | 21 + .../aws-sdk-go-v2/service/sso/endpoints.go | 519 + .../aws-sdk-go-v2/service/sso/generated.json | 33 + .../service/sso/go_module_metadata.go | 6 + .../sso/internal/endpoints/endpoints.go | 526 + .../aws-sdk-go-v2/service/sso/serializers.go | 284 + .../aws-sdk-go-v2/service/sso/types/errors.go | 115 + .../aws-sdk-go-v2/service/sso/types/types.go | 61 + .../aws-sdk-go-v2/service/sso/validators.go | 175 + .../service/ssooidc/CHANGELOG.md | 317 + .../aws-sdk-go-v2/service/ssooidc/LICENSE.txt | 202 + .../service/ssooidc/api_client.go | 526 + .../service/ssooidc/api_op_CreateToken.go | 316 + .../service/ssooidc/api_op_RegisterClient.go | 281 + .../api_op_StartDeviceAuthorization.go | 289 + .../service/ssooidc/deserializers.go | 1689 +++ .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 36 + .../service/ssooidc/endpoints.go | 519 + .../service/ssooidc/generated.json | 32 + .../service/ssooidc/go_module_metadata.go | 6 + .../ssooidc/internal/endpoints/endpoints.go | 526 + .../service/ssooidc/serializers.go | 309 + .../service/ssooidc/types/errors.go | 366 + .../service/ssooidc/types/types.go | 9 + .../service/ssooidc/validators.go | 142 + .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 338 + .../aws/aws-sdk-go-v2/service/sts/LICENSE.txt | 202 + .../aws-sdk-go-v2/service/sts/api_client.go | 630 ++ .../service/sts/api_op_AssumeRole.go | 558 + .../service/sts/api_op_AssumeRoleWithSAML.go | 482 + .../sts/api_op_AssumeRoleWithWebIdentity.go | 501 + .../sts/api_op_DecodeAuthorizationMessage.go | 285 + .../service/sts/api_op_GetAccessKeyInfo.go | 278 + .../service/sts/api_op_GetCallerIdentity.go | 294 + .../service/sts/api_op_GetFederationToken.go | 445 + .../service/sts/api_op_GetSessionToken.go | 328 + .../service/sts/deserializers.go | 2507 +++++ .../aws/aws-sdk-go-v2/service/sts/doc.go | 11 + .../aws-sdk-go-v2/service/sts/endpoints.go | 996 ++ .../aws-sdk-go-v2/service/sts/generated.json | 38 + .../service/sts/go_module_metadata.go | 6 + .../sts/internal/endpoints/endpoints.go | 509 + .../aws-sdk-go-v2/service/sts/serializers.go | 862 ++ .../aws-sdk-go-v2/service/sts/types/errors.go | 244 + .../aws-sdk-go-v2/service/sts/types/types.go | 130 + .../aws-sdk-go-v2/service/sts/validators.go | 305 + vendor/github.com/aws/smithy-go/.gitignore | 26 + vendor/github.com/aws/smithy-go/.travis.yml | 28 + vendor/github.com/aws/smithy-go/CHANGELOG.md | 194 + .../aws/smithy-go/CODE_OF_CONDUCT.md | 4 + .../github.com/aws/smithy-go/CONTRIBUTING.md | 59 + vendor/github.com/aws/smithy-go/LICENSE | 175 + vendor/github.com/aws/smithy-go/Makefile | 97 + vendor/github.com/aws/smithy-go/NOTICE | 1 + vendor/github.com/aws/smithy-go/README.md | 27 + .../aws/smithy-go/auth/bearer/docs.go | 3 + .../aws/smithy-go/auth/bearer/middleware.go | 104 + .../aws/smithy-go/auth/bearer/token.go | 50 + .../aws/smithy-go/auth/bearer/token_cache.go | 208 + .../aws/smithy-go/context/suppress_expired.go | 81 + vendor/github.com/aws/smithy-go/doc.go | 2 + vendor/github.com/aws/smithy-go/document.go | 10 + .../github.com/aws/smithy-go/document/doc.go | 12 + .../aws/smithy-go/document/document.go | 153 + .../aws/smithy-go/document/errors.go | 75 + .../github.com/aws/smithy-go/encoding/doc.go | 4 + .../aws/smithy-go/encoding/encoding.go | 40 + .../smithy-go/encoding/httpbinding/encode.go | 123 + .../smithy-go/encoding/httpbinding/header.go | 122 + .../encoding/httpbinding/path_replace.go | 108 + .../smithy-go/encoding/httpbinding/query.go | 107 + .../aws/smithy-go/encoding/httpbinding/uri.go | 111 + .../aws/smithy-go/encoding/json/array.go | 35 + .../aws/smithy-go/encoding/json/constants.go | 15 + .../smithy-go/encoding/json/decoder_util.go | 139 + .../aws/smithy-go/encoding/json/encoder.go | 30 + .../aws/smithy-go/encoding/json/escape.go | 198 + .../aws/smithy-go/encoding/json/object.go | 40 + .../aws/smithy-go/encoding/json/value.go | 149 + .../aws/smithy-go/encoding/xml/array.go | 49 + .../aws/smithy-go/encoding/xml/constants.go | 10 + .../aws/smithy-go/encoding/xml/doc.go | 49 + .../aws/smithy-go/encoding/xml/element.go | 91 + .../aws/smithy-go/encoding/xml/encoder.go | 51 + .../aws/smithy-go/encoding/xml/error_utils.go | 51 + .../aws/smithy-go/encoding/xml/escape.go | 137 + .../aws/smithy-go/encoding/xml/map.go | 53 + .../aws/smithy-go/encoding/xml/value.go | 302 + .../aws/smithy-go/encoding/xml/xml_decoder.go | 154 + .../aws/smithy-go/endpoints/endpoint.go | 23 + vendor/github.com/aws/smithy-go/errors.go | 137 + .../aws/smithy-go/go_module_metadata.go | 6 + .../internal/sync/singleflight/LICENSE | 28 + .../internal/sync/singleflight/docs.go | 8 + .../sync/singleflight/singleflight.go | 210 + vendor/github.com/aws/smithy-go/io/byte.go | 12 + vendor/github.com/aws/smithy-go/io/doc.go | 2 + vendor/github.com/aws/smithy-go/io/reader.go | 16 + .../github.com/aws/smithy-go/io/ringbuffer.go | 94 + .../aws/smithy-go/local-mod-replace.sh | 39 + .../aws/smithy-go/logging/logger.go | 82 + .../aws/smithy-go/middleware/doc.go | 67 + .../aws/smithy-go/middleware/logging.go | 46 + .../aws/smithy-go/middleware/metadata.go | 65 + .../aws/smithy-go/middleware/middleware.go | 71 + .../aws/smithy-go/middleware/ordered_group.go | 268 + .../aws/smithy-go/middleware/stack.go | 209 + .../aws/smithy-go/middleware/stack_values.go | 100 + .../aws/smithy-go/middleware/step_build.go | 211 + .../smithy-go/middleware/step_deserialize.go | 217 + .../aws/smithy-go/middleware/step_finalize.go | 211 + .../smithy-go/middleware/step_initialize.go | 211 + .../smithy-go/middleware/step_serialize.go | 219 + vendor/github.com/aws/smithy-go/modman.toml | 11 + vendor/github.com/aws/smithy-go/properties.go | 52 + vendor/github.com/aws/smithy-go/ptr/doc.go | 5 + .../github.com/aws/smithy-go/ptr/from_ptr.go | 601 ++ .../aws/smithy-go/ptr/gen_scalars.go | 83 + vendor/github.com/aws/smithy-go/ptr/to_ptr.go | 499 + vendor/github.com/aws/smithy-go/rand/doc.go | 3 + vendor/github.com/aws/smithy-go/rand/rand.go | 31 + vendor/github.com/aws/smithy-go/rand/uuid.go | 87 + vendor/github.com/aws/smithy-go/time/time.go | 134 + .../transport/http/checksum_middleware.go | 70 + .../aws/smithy-go/transport/http/client.go | 120 + .../aws/smithy-go/transport/http/doc.go | 5 + .../smithy-go/transport/http/headerlist.go | 163 + .../aws/smithy-go/transport/http/host.go | 89 + .../transport/http/internal/io/safe.go | 75 + .../smithy-go/transport/http/md5_checksum.go | 25 + .../http/middleware_close_response_body.go | 79 + .../http/middleware_content_length.go | 84 + .../http/middleware_header_comment.go | 81 + .../transport/http/middleware_headers.go | 167 + .../transport/http/middleware_http_logging.go | 75 + .../transport/http/middleware_metadata.go | 51 + .../transport/http/middleware_min_proto.go | 79 + .../aws/smithy-go/transport/http/request.go | 189 + .../aws/smithy-go/transport/http/response.go | 34 + .../aws/smithy-go/transport/http/time.go | 13 + .../aws/smithy-go/transport/http/url.go | 44 + .../smithy-go/transport/http/user_agent.go | 37 + vendor/github.com/aws/smithy-go/validation.go | 140 + .../github.com/ebitengine/purego/.gitignore | 1 + vendor/github.com/ebitengine/purego/LICENSE | 201 + vendor/github.com/ebitengine/purego/README.md | 97 + .../github.com/ebitengine/purego/abi_amd64.h | 99 + .../github.com/ebitengine/purego/abi_arm64.h | 39 + vendor/github.com/ebitengine/purego/cgo.go | 19 + .../github.com/ebitengine/purego/dlerror.go | 17 + vendor/github.com/ebitengine/purego/dlfcn.go | 99 + .../ebitengine/purego/dlfcn_android.go | 34 + .../ebitengine/purego/dlfcn_darwin.go | 24 + .../ebitengine/purego/dlfcn_freebsd.go | 14 + .../ebitengine/purego/dlfcn_linux.go | 16 + .../ebitengine/purego/dlfcn_nocgo_freebsd.go | 11 + .../ebitengine/purego/dlfcn_nocgo_linux.go | 19 + .../ebitengine/purego/dlfcn_playground.go | 24 + .../ebitengine/purego/dlfcn_stubs.s | 26 + vendor/github.com/ebitengine/purego/func.go | 436 + .../ebitengine/purego/go_runtime.go | 13 + .../purego/internal/cgo/dlfcn_cgo_unix.go | 56 + .../ebitengine/purego/internal/cgo/empty.go | 6 + .../purego/internal/cgo/syscall_cgo_unix.go | 55 + .../purego/internal/fakecgo/abi_amd64.h | 99 + .../purego/internal/fakecgo/abi_arm64.h | 39 + .../purego/internal/fakecgo/asm_amd64.s | 39 + .../purego/internal/fakecgo/asm_arm64.s | 36 + .../purego/internal/fakecgo/callbacks.go | 93 + .../ebitengine/purego/internal/fakecgo/doc.go | 32 + .../purego/internal/fakecgo/freebsd.go | 27 + .../internal/fakecgo/go_darwin_amd64.go | 73 + .../internal/fakecgo/go_darwin_arm64.go | 88 + .../internal/fakecgo/go_freebsd_amd64.go | 95 + .../internal/fakecgo/go_freebsd_arm64.go | 98 + .../purego/internal/fakecgo/go_libinit.go | 66 + .../purego/internal/fakecgo/go_linux_amd64.go | 95 + .../purego/internal/fakecgo/go_linux_arm64.go | 98 + .../purego/internal/fakecgo/go_setenv.go | 18 + .../purego/internal/fakecgo/go_util.go | 37 + .../purego/internal/fakecgo/iscgo.go | 19 + .../purego/internal/fakecgo/libcgo.go | 35 + .../purego/internal/fakecgo/libcgo_darwin.go | 22 + .../purego/internal/fakecgo/libcgo_freebsd.go | 16 + .../purego/internal/fakecgo/libcgo_linux.go | 16 + .../purego/internal/fakecgo/setenv.go | 19 + .../purego/internal/fakecgo/symbols.go | 181 + .../purego/internal/fakecgo/symbols_darwin.go | 29 + .../internal/fakecgo/symbols_freebsd.go | 29 + .../purego/internal/fakecgo/symbols_linux.go | 29 + .../internal/fakecgo/trampolines_amd64.s | 104 + .../internal/fakecgo/trampolines_arm64.s | 72 + .../internal/fakecgo/trampolines_stubs.s | 90 + .../purego/internal/strings/strings.go | 40 + vendor/github.com/ebitengine/purego/is_ios.go | 13 + vendor/github.com/ebitengine/purego/nocgo.go | 25 + .../ebitengine/purego/struct_amd64.go | 260 + .../ebitengine/purego/struct_arm64.go | 274 + .../ebitengine/purego/struct_other.go | 16 + .../github.com/ebitengine/purego/sys_amd64.s | 164 + .../github.com/ebitengine/purego/sys_arm64.s | 92 + .../ebitengine/purego/sys_unix_arm64.s | 70 + .../github.com/ebitengine/purego/syscall.go | 53 + .../ebitengine/purego/syscall_cgo_linux.go | 21 + .../ebitengine/purego/syscall_sysv.go | 223 + .../ebitengine/purego/syscall_windows.go | 46 + .../ebitengine/purego/zcallback_amd64.s | 2014 ++++ .../ebitengine/purego/zcallback_arm64.s | 4014 +++++++ vendor/github.com/elastic/go-grok/.gitignore | 21 + vendor/github.com/elastic/go-grok/.go-version | 1 + .../{oklog/run => elastic/go-grok}/LICENSE | 0 vendor/github.com/elastic/go-grok/NOTICE.txt | 2024 ++++ vendor/github.com/elastic/go-grok/README.md | 223 + .../elastic/go-grok/catalog-info.yaml | 58 + .../go-grok/dev-tools/mage/benchmark.go | 163 + .../elastic/go-grok/dev-tools/mage/check.go | 48 + .../elastic/go-grok/dev-tools/mage/deps.go | 43 + .../elastic/go-grok/dev-tools/mage/fmt.go | 107 + .../go-grok/dev-tools/mage/gotool/get.go | 43 + .../go-grok/dev-tools/mage/gotool/go.go | 343 + .../go-grok/dev-tools/mage/gotool/licenser.go | 35 + .../go-grok/dev-tools/mage/gotool/modules.go | 64 + .../go-grok/dev-tools/mage/gotool/noticer.go | 38 + .../elastic/go-grok/dev-tools/mage/install.go | 44 + .../elastic/go-grok/dev-tools/mage/linter.go | 175 + .../elastic/go-grok/dev-tools/mage/mage.go | 40 + .../elastic/go-grok/dev-tools/mage/notice.go | 57 + vendor/github.com/elastic/go-grok/grok.go | 377 + vendor/github.com/elastic/go-grok/magefile.go | 90 + .../elastic/go-grok/patterns/aws.go | 33 + .../elastic/go-grok/patterns/bind9.go | 27 + .../elastic/go-grok/patterns/bro.go | 27 + .../elastic/go-grok/patterns/default.go | 121 + .../elastic/go-grok/patterns/exim.go | 38 + .../elastic/go-grok/patterns/firewalls.go | 95 + .../elastic/go-grok/patterns/haproxy.go | 30 + .../elastic/go-grok/patterns/httpd.go | 33 + .../elastic/go-grok/patterns/java.go | 46 + .../elastic/go-grok/patterns/junos.go | 27 + .../elastic/go-grok/patterns/maven.go | 22 + .../elastic/go-grok/patterns/mcollective.go | 23 + .../elastic/go-grok/patterns/mongodb.go | 29 + .../elastic/go-grok/patterns/postgresql.go | 22 + .../elastic/go-grok/patterns/rails.go | 30 + .../elastic/go-grok/patterns/redis.go | 24 + .../elastic/go-grok/patterns/ruby.go | 23 + .../elastic/go-grok/patterns/squid.go | 23 + .../elastic/go-grok/patterns/syslog.go | 36 + vendor/github.com/elastic/lunes/.editorconfig | 8 + vendor/github.com/elastic/lunes/.gitignore | 25 + vendor/github.com/elastic/lunes/.go-version | 1 + vendor/github.com/elastic/lunes/.golangci.yml | 114 + vendor/github.com/elastic/lunes/CHANGELOG.md | 0 vendor/github.com/elastic/lunes/LICENSE | 201 + vendor/github.com/elastic/lunes/NOTICE.txt | 1991 ++++ vendor/github.com/elastic/lunes/README.md | 144 + .../elastic/lunes/catalog-info.yaml | 57 + .../elastic/lunes/dev-tools/mage/check.go | 49 + .../elastic/lunes/dev-tools/mage/deps.go | 43 + .../elastic/lunes/dev-tools/mage/fmt.go | 107 + .../lunes/dev-tools/mage/gotool/get.go | 43 + .../elastic/lunes/dev-tools/mage/gotool/go.go | 343 + .../lunes/dev-tools/mage/gotool/licenser.go | 35 + .../lunes/dev-tools/mage/gotool/modules.go | 64 + .../lunes/dev-tools/mage/gotool/noticer.go | 38 + .../elastic/lunes/dev-tools/mage/install.go | 44 + .../elastic/lunes/dev-tools/mage/linter.go | 175 + .../elastic/lunes/dev-tools/mage/mage.go | 40 + .../elastic/lunes/dev-tools/mage/notice.go | 57 + vendor/github.com/elastic/lunes/locale.go | 113 + vendor/github.com/elastic/lunes/lunes.go | 482 + vendor/github.com/elastic/lunes/magefile.go | 107 + vendor/github.com/elastic/lunes/tables.go | 9616 +++++++++++++++++ vendor/github.com/expr-lang/expr/README.md | 3 + vendor/github.com/expr-lang/expr/ast/print.go | 35 +- .../github.com/expr-lang/expr/ast/visitor.go | 3 + .../expr-lang/expr/builtin/builtin.go | 83 +- .../github.com/expr-lang/expr/builtin/lib.go | 39 - .../expr-lang/expr/builtin/utils.go | 13 +- .../expr-lang/expr/checker/checker.go | 70 +- .../expr-lang/expr/compiler/compiler.go | 165 +- vendor/github.com/expr-lang/expr/expr.go | 41 +- .../github.com/expr-lang/expr/file/error.go | 28 +- .../expr-lang/expr/file/location.go | 8 +- .../github.com/expr-lang/expr/file/source.go | 73 +- .../expr-lang/expr/optimizer/optimizer.go | 2 + .../expr/optimizer/predicate_combination.go | 16 +- .../expr-lang/expr/optimizer/sum_array.go | 37 + .../expr-lang/expr/optimizer/sum_map.go | 25 + .../expr-lang/expr/parser/lexer/lexer.go | 91 +- .../expr-lang/expr/parser/lexer/state.go | 14 +- .../expr-lang/expr/parser/parser.go | 17 +- .../expr-lang/expr/patcher/with_context.go | 15 +- .../expr-lang/expr/patcher/with_timezone.go | 25 + .../github.com/expr-lang/expr/vm/program.go | 6 +- .../expr/vm/runtime/helpers[generated].go | 338 + .../expr-lang/expr/vm/runtime/runtime.go | 6 +- vendor/github.com/expr-lang/expr/vm/vm.go | 21 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.editorconfig | 12 - .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 +- .../fsnotify/fsnotify/backend_fen.go | 324 +- .../fsnotify/fsnotify/backend_inotify.go | 594 +- .../fsnotify/fsnotify/backend_kqueue.go | 747 +- .../fsnotify/fsnotify/backend_other.go | 204 +- .../fsnotify/fsnotify/backend_windows.go | 305 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - vendor/github.com/go-kit/kit/LICENSE | 22 - .../github.com/go-kit/kit/metrics/README.md | 98 - vendor/github.com/go-kit/kit/metrics/doc.go | 97 - .../go-kit/kit/metrics/expvar/expvar.go | 94 - .../go-kit/kit/metrics/generic/generic.go | 247 - .../kit/metrics/internal/lv/labelvalues.go | 14 - .../go-kit/kit/metrics/internal/lv/space.go | 145 - .../github.com/go-kit/kit/metrics/metrics.go | 25 - vendor/github.com/go-kit/kit/metrics/timer.go | 36 - .../go-viper/mapstructure/v2/CHANGELOG.md | 3 + .../go-viper/mapstructure/v2/README.md | 21 + .../go-viper/mapstructure/v2/decode_hooks.go | 304 +- .../go-viper/mapstructure/v2/error.go | 50 - .../go-viper/mapstructure/v2/flake.lock | 303 +- .../mapstructure/v2/internal/errors/errors.go | 11 + .../mapstructure/v2/internal/errors/join.go | 9 + .../v2/internal/errors/join_go1_19.go | 61 + .../go-viper/mapstructure/v2/mapstructure.go | 186 +- .../goccy/go-json/internal/decoder/compile.go | 18 +- .../internal/decoder/compile_norace.go | 1 + .../go-json/internal/decoder/compile_race.go | 1 + .../go-json/internal/encoder/compiler.go | 16 +- .../internal/encoder/compiler_norace.go | 1 + .../go-json/internal/encoder/compiler_race.go | 1 + .../goccy/go-json/internal/encoder/encoder.go | 5 + .../proto/common_go_proto/common.pb.go | 99 +- .../s2a_context_go_proto/s2a_context.pb.go | 6 +- .../internal/proto/s2a_go_proto/s2a.pb.go | 28 +- .../proto/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../proto/v2/common_go_proto/common.pb.go | 296 +- .../v2/s2a_context_go_proto/s2a_context.pb.go | 73 +- .../internal/proto/v2/s2a_go_proto/s2a.pb.go | 772 +- .../proto/v2/s2a_go_proto/s2a_grpc.pb.go | 13 +- .../google/s2a-go/internal/record/record.go | 34 +- .../internal/tokenmanager/tokenmanager.go | 15 +- .../google/s2a-go/internal/v2/s2av2.go | 40 +- .../v2/tlsconfigstore/tlsconfigstore.go | 15 +- vendor/github.com/google/s2a-go/s2a.go | 103 +- .../github.com/google/s2a-go/s2a_options.go | 28 +- .../github.com/gorilla/handlers/.editorconfig | 20 + vendor/github.com/gorilla/handlers/.gitignore | 2 + vendor/github.com/gorilla/handlers/LICENSE | 39 +- vendor/github.com/gorilla/handlers/Makefile | 34 + vendor/github.com/gorilla/handlers/README.md | 8 +- .../github.com/gorilla/handlers/canonical.go | 9 +- .../github.com/gorilla/handlers/compress.go | 8 +- vendor/github.com/gorilla/handlers/cors.go | 41 +- .../github.com/gorilla/handlers/handlers.go | 15 +- vendor/github.com/gorilla/handlers/logging.go | 32 +- .../gorilla/handlers/proxy_headers.go | 16 +- .../github.com/gorilla/handlers/recovery.go | 22 +- vendor/github.com/grafana/dskit/test/diff.go | 28 + vendor/github.com/grafana/dskit/test/poll.go | 26 + .../go-grpc-middleware/logging/common.go | 43 - .../go-grpc-middleware/logging/doc.go | 32 - .../logging/settable/doc.go | 16 - .../logging/settable/logsettable.go | 99 - .../logging/zap/client_interceptors.go | 64 - .../go-grpc-middleware/logging/zap/context.go | 21 - .../logging/zap/ctxzap/context.go | 88 - .../logging/zap/ctxzap/doc.go | 14 - .../go-grpc-middleware/logging/zap/doc.go | 75 - .../logging/zap/grpclogger.go | 141 - .../go-grpc-middleware/logging/zap/options.go | 217 - .../logging/zap/payload_interceptors.go | 150 - .../logging/zap/server_interceptors.go | 85 - .../go-grpc-middleware/retry/backoff.go | 44 - .../go-grpc-middleware/tags/context.go | 78 - .../go-grpc-middleware/tags/doc.go | 22 - .../go-grpc-middleware/tags/fieldextractor.go | 85 - .../go-grpc-middleware/tags/interceptors.go | 85 - .../go-grpc-middleware/tags/options.go | 44 - .../util/backoffutils/backoff.go | 28 - .../go-grpc-middleware/util/metautils/doc.go | 19 - .../util/metautils/nicemd.go | 126 - .../go-grpc-middleware/v2/COPYRIGHT | 2 + .../go-grpc-middleware/v2/LICENSE | 201 + .../v2/interceptors/retry/backoff.go | 55 + .../{ => v2/interceptors}/retry/doc.go | 14 +- .../{ => v2/interceptors}/retry/options.go | 79 +- .../{ => v2/interceptors}/retry/retry.go | 81 +- .../go-grpc-middleware/v2/metadata/doc.go | 19 + .../v2/metadata/metadata.go | 126 + .../v2/metadata/single_key.go | 21 + .../grpc-gateway/v2/runtime/BUILD.bazel | 2 +- .../grpc-gateway/v2/runtime/context.go | 11 + .../grpc-gateway/v2/runtime/convert.go | 6 +- .../grpc-gateway/v2/runtime/errors.go | 31 +- .../grpc-gateway/v2/runtime/fieldmask.go | 2 +- .../grpc-gateway/v2/runtime/handler.go | 40 +- .../grpc-gateway/v2/runtime/marshaler.go | 8 + .../v2/runtime/marshaler_registry.go | 4 +- .../grpc-gateway/v2/runtime/mux.go | 69 +- .../grpc-gateway/v2/runtime/proto2_convert.go | 4 +- .../grpc-gateway/v2/runtime/query.go | 6 +- .../grpc-gateway/v2/utilities/pattern.go | 2 +- .../v2/utilities/string_array_flag.go | 2 +- .../github.com/hashicorp/go-plugin/.gitignore | 2 - .../hashicorp/go-plugin/CHANGELOG.md | 102 - vendor/github.com/hashicorp/go-plugin/LICENSE | 355 - .../github.com/hashicorp/go-plugin/README.md | 165 - .../hashicorp/go-plugin/buf.gen.yaml | 14 - .../github.com/hashicorp/go-plugin/buf.yaml | 7 - .../github.com/hashicorp/go-plugin/client.go | 1239 --- .../hashicorp/go-plugin/constants.go | 16 - .../hashicorp/go-plugin/discover.go | 31 - .../github.com/hashicorp/go-plugin/error.go | 27 - .../hashicorp/go-plugin/grpc_broker.go | 654 -- .../hashicorp/go-plugin/grpc_client.go | 134 - .../hashicorp/go-plugin/grpc_controller.go | 26 - .../hashicorp/go-plugin/grpc_server.go | 167 - .../hashicorp/go-plugin/grpc_stdio.go | 210 - .../internal/cmdrunner/addr_translator.go | 16 - .../internal/cmdrunner/cmd_reattach.go | 63 - .../internal/cmdrunner/cmd_runner.go | 129 - .../internal/cmdrunner/notes_unix.go | 70 - .../internal/cmdrunner/notes_windows.go | 46 - .../go-plugin/internal/cmdrunner/process.go | 25 - .../internal/cmdrunner/process_posix.go | 23 - .../internal/cmdrunner/process_windows.go | 33 - .../grpcmux/blocked_client_listener.go | 51 - .../grpcmux/blocked_server_listener.go | 49 - .../internal/grpcmux/grpc_client_muxer.go | 105 - .../go-plugin/internal/grpcmux/grpc_muxer.go | 41 - .../internal/grpcmux/grpc_server_muxer.go | 190 - .../internal/plugin/grpc_broker.pb.go | 264 - .../internal/plugin/grpc_broker.proto | 22 - .../internal/plugin/grpc_broker_grpc.pb.go | 142 - .../internal/plugin/grpc_controller.pb.go | 141 - .../internal/plugin/grpc_controller.proto | 14 - .../plugin/grpc_controller_grpc.pb.go | 110 - .../internal/plugin/grpc_stdio.pb.go | 225 - .../internal/plugin/grpc_stdio.proto | 33 - .../internal/plugin/grpc_stdio_grpc.pb.go | 148 - .../hashicorp/go-plugin/log_entry.go | 76 - vendor/github.com/hashicorp/go-plugin/mtls.go | 76 - .../hashicorp/go-plugin/mux_broker.go | 207 - .../github.com/hashicorp/go-plugin/plugin.go | 61 - .../github.com/hashicorp/go-plugin/process.go | 4 - .../hashicorp/go-plugin/protocol.go | 48 - .../hashicorp/go-plugin/rpc_client.go | 173 - .../hashicorp/go-plugin/rpc_server.go | 209 - .../hashicorp/go-plugin/runner/runner.go | 72 - .../github.com/hashicorp/go-plugin/server.go | 665 -- .../hashicorp/go-plugin/server_mux.go | 34 - .../github.com/hashicorp/go-plugin/stream.go | 21 - .../github.com/hashicorp/go-plugin/testing.go | 185 - .../{yamux => golang-lru}/.gitignore | 0 .../hashicorp/golang-lru/.golangci.yml | 30 + vendor/github.com/hashicorp/golang-lru/2q.go | 222 + .../github.com/hashicorp/golang-lru/README.md | 7 + vendor/github.com/hashicorp/golang-lru/arc.go | 256 + vendor/github.com/hashicorp/golang-lru/doc.go | 21 + vendor/github.com/hashicorp/golang-lru/lru.go | 231 + .../hashicorp/golang-lru/testing.go | 16 + vendor/github.com/hashicorp/yamux/LICENSE | 362 - vendor/github.com/hashicorp/yamux/README.md | 86 - vendor/github.com/hashicorp/yamux/addr.go | 60 - vendor/github.com/hashicorp/yamux/const.go | 182 - vendor/github.com/hashicorp/yamux/mux.go | 114 - vendor/github.com/hashicorp/yamux/session.go | 732 -- vendor/github.com/hashicorp/yamux/spec.md | 140 - vendor/github.com/hashicorp/yamux/stream.go | 544 - vendor/github.com/hashicorp/yamux/util.go | 43 - .../agent/app/configmanager/grpc/manager.go | 15 +- .../cmd/agent/app/configmanager/manager.go | 13 +- .../cmd/agent/app/configmanager/metrics.go | 13 +- .../buffered_read_transport.go | 22 +- .../jaeger/cmd/agent/app/httpserver/srv.go | 13 +- .../cmd/agent/app/processors/processor.go | 13 +- .../agent/app/processors/thrift_processor.go | 15 +- .../cmd/agent/app/reporter/client_metrics.go | 27 +- .../cmd/agent/app/reporter/connect_metrics.go | 13 +- .../jaeger/cmd/agent/app/reporter/flags.go | 22 +- .../cmd/agent/app/reporter/grpc/builder.go | 17 +- .../app/reporter/grpc/collector_proxy.go | 13 +- .../cmd/agent/app/reporter/grpc/flags.go | 25 +- .../cmd/agent/app/reporter/grpc/reporter.go | 13 +- .../jaeger/cmd/agent/app/reporter/metrics.go | 13 +- .../jaeger/cmd/agent/app/reporter/reporter.go | 13 +- .../jaeger/cmd/agent/app/servers/server.go | 13 +- .../cmd/agent/app/servers/tbuffered_server.go | 15 +- .../app/servers/thriftudp/socket_buffer.go | 13 +- .../thriftudp/socket_buffer_windows.go | 13 +- .../agent/app/servers/thriftudp/transport.go | 17 +- .../all-in-one/setupcontext/setupcontext.go | 13 +- .../collector/app/sampling/model/sampling.go | 13 +- .../app/sampling/samplingstrategy/factory.go | 28 + .../interface.go | 27 +- .../app/sampling/strategystore/factory.go | 36 - .../app/sanitizer/zipkin/span_sanitizer.go | 21 +- .../jaeger/cmd/internal/flags/.nocover | 2 - .../jaeger/cmd/internal/flags/admin.go | 19 +- .../jaeger/cmd/internal/flags/doc.go | 13 +- .../jaeger/cmd/internal/flags/flags.go | 27 +- .../jaeger/cmd/internal/flags/service.go | 33 +- .../jaeger/internal/metrics/expvar/cache.go | 83 - .../jaeger/internal/metrics/expvar/factory.go | 134 - .../jaeger/internal/metrics/expvar/metrics.go | 82 - .../metrics/metricsbuilder/builder.go | 26 +- .../internal/metrics/prometheus/cache.go | 15 +- .../internal/metrics/prometheus/factory.go | 17 +- .../jaeger/model/converter/json/doc.go | 13 +- .../model/converter/json/from_domain.go | 21 +- .../model/converter/json/process_hashtable.go | 13 +- .../jaeger/model/converter/json/sampling.go | 13 +- .../model/converter/thrift/jaeger/doc.go | 13 +- .../converter/thrift/jaeger/from_domain.go | 20 +- .../thrift/jaeger/sampling_from_domain.go | 18 +- .../thrift/jaeger/sampling_to_domain.go | 13 +- .../converter/thrift/jaeger/to_domain.go | 43 +- .../converter/thrift/zipkin/deserialize.go | 13 +- .../model/converter/thrift/zipkin/doc.go | 13 +- .../thrift/zipkin/process_hashtable.go | 13 +- .../converter/thrift/zipkin/to_domain.go | 70 +- .../jaeger/model/dependencies.go | 20 +- .../jaegertracing/jaeger/model/doc.go | 13 +- .../jaegertracing/jaeger/model/hash.go | 13 +- .../jaegertracing/jaeger/model/ids.go | 40 +- .../jaegertracing/jaeger/model/json/doc.go | 13 +- .../jaegertracing/jaeger/model/json/model.go | 19 +- .../jaegertracing/jaeger/model/keyvalue.go | 18 +- .../jaegertracing/jaeger/model/process.go | 13 +- .../jaegertracing/jaeger/model/sort.go | 13 +- .../jaegertracing/jaeger/model/span.go | 13 +- .../jaegertracing/jaeger/model/spanref.go | 13 +- .../jaegertracing/jaeger/model/time.go | 17 +- .../jaegertracing/jaeger/model/trace.go | 13 +- .../jaeger/pkg/bearertoken/context.go | 38 - .../jaeger/pkg/bearertoken/http.go | 55 - .../jaeger/pkg/bearertoken/transport.go | 52 - .../pkg/clientcfg/clientcfghttp/cfgmgr.go | 21 +- .../pkg/clientcfg/clientcfghttp/handler.go | 32 +- .../jaeger/pkg/config/tlscfg/cert_watcher.go | 13 +- .../jaeger/pkg/config/tlscfg/certpool_unix.go | 13 +- .../pkg/config/tlscfg/certpool_windows.go | 13 +- .../jaeger/pkg/config/tlscfg/ciphersuites.go | 13 +- .../jaeger/pkg/config/tlscfg/flags.go | 13 +- .../jaeger/pkg/config/tlscfg/options.go | 112 +- .../jaeger/pkg/discovery/discoverer.go | 13 +- .../discovery/grpcresolver/grpc_resolver.go | 17 +- .../jaeger/pkg/discovery/notifier.go | 13 +- .../jaeger/pkg/distributedlock/interface.go | 13 +- .../jaeger/pkg/fswatcher/fswatcher.go | 29 +- .../jaeger/pkg/gogocodec/codec.go | 33 +- .../jaeger/pkg/healthcheck/handler.go | 15 +- .../jaeger/pkg/metrics/counter.go | 13 +- .../jaeger/pkg/metrics/factory.go | 23 +- .../jaegertracing/jaeger/pkg/metrics/gauge.go | 13 +- .../jaeger/pkg/metrics/histogram.go | 13 +- .../jaeger/pkg/metrics/metrics.go | 19 +- .../jaeger/pkg/metrics/package.go | 13 +- .../jaeger/pkg/metrics/stopwatch.go | 13 +- .../jaegertracing/jaeger/pkg/metrics/timer.go | 13 +- .../jaegertracing/jaeger/pkg/netutils/port.go | 13 +- .../jaeger/pkg/recoveryhandler/zap.go | 15 +- .../jaegertracing/jaeger/pkg/version/build.go | 13 +- .../jaeger/pkg/version/command.go | 15 +- .../jaeger/pkg/version/handler.go | 17 +- .../plugin/storage/grpc/shared/archive.go | 23 +- .../plugin/storage/grpc/shared/grpc_client.go | 125 +- .../storage/grpc/shared/grpc_handler.go | 43 +- .../plugin/storage/grpc/shared/interface.go | 29 +- .../plugin/storage/grpc/shared/plugin.go | 53 - .../storage/grpc/shared/streaming_writer.go | 15 +- .../jaegertracing/jaeger/ports/ports.go | 40 +- .../storage/dependencystore/interface.go | 13 +- .../jaegertracing/jaeger/storage/doc.go | 13 +- .../jaegertracing/jaeger/storage/factory.go | 16 +- .../jaeger/storage/metricsstore/interface.go | 13 +- .../jaeger/storage/samplingstore/interface.go | 13 +- .../jaeger/storage/spanstore/composite.go | 13 +- .../storage/spanstore/downsampling_writer.go | 15 +- .../jaeger/storage/spanstore/interface.go | 13 +- .../github.com/klauspost/cpuid/v2/README.md | 1 + vendor/github.com/klauspost/cpuid/v2/cpuid.go | 84 +- .../klauspost/cpuid/v2/cpuid_arm64.s | 10 + .../klauspost/cpuid/v2/detect_arm64.go | 3 +- .../klauspost/cpuid/v2/detect_ref.go | 2 + .../klauspost/cpuid/v2/detect_x86.go | 3 + .../klauspost/cpuid/v2/featureid_string.go | 440 +- vendor/github.com/knadh/koanf/v2/README.md | 17 +- vendor/github.com/knadh/koanf/v2/go.work.sum | 9 +- vendor/github.com/lufia/plan9stats/cpu.go | 5 +- vendor/github.com/magefile/mage/LICENSE | 201 + vendor/github.com/magefile/mage/mg/color.go | 80 + .../magefile/mage/mg/color_string.go | 38 + vendor/github.com/magefile/mage/mg/deps.go | 211 + vendor/github.com/magefile/mage/mg/errors.go | 51 + vendor/github.com/magefile/mage/mg/fn.go | 192 + vendor/github.com/magefile/mage/mg/runtime.go | 136 + vendor/github.com/magefile/mage/sh/cmd.go | 184 + vendor/github.com/magefile/mage/sh/helpers.go | 40 + .../minio/minio-go/v7/api-prompt-object.go | 78 - .../minio/minio-go/v7/api-prompt-options.go | 84 - .../minio-go/v7/api-put-object-fan-out.go | 5 +- vendor/github.com/minio/minio-go/v7/api.go | 2 +- .../minio/minio-go/v7/functional_tests.go | 1933 +++- .../v7/pkg/credentials/sts_web_identity.go | 7 +- .../minio/minio-go/v7/post-policy.go | 71 +- .../minio/minio-go/v7/retry-continous.go | 10 +- vendor/github.com/minio/minio-go/v7/retry.go | 10 +- .../go-testing-interface/.travis.yml | 13 - .../mitchellh/go-testing-interface/README.md | 52 - .../mitchellh/go-testing-interface/testing.go | 84 - .../go-testing-interface/testing_go19.go | 108 - .../go-grpc-compression/internal/zstd/zstd.go | 146 + .../nonclobbering/zstd/zstd.go | 52 + vendor/github.com/oklog/run/.gitignore | 14 - vendor/github.com/oklog/run/README.md | 75 - vendor/github.com/oklog/run/actors.go | 38 - vendor/github.com/oklog/run/group.go | 62 - .../exporter/kafkaexporter/README.md | 18 +- .../exporter/kafkaexporter/config.go | 12 +- .../exporter/kafkaexporter/factory.go | 79 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/generated_telemetry.go | 17 - .../kafkaexporter/jaeger_marshaler.go | 8 +- .../exporter/kafkaexporter/kafka_exporter.go | 179 +- .../exporter/kafkaexporter/marshaler.go | 145 +- .../exporter/kafkaexporter/metadata.yaml | 1 - .../exporter/kafkaexporter/pdata_marshaler.go | 107 +- .../exporter/kafkaexporter/raw_marshaler.go | 3 +- .../exporter/zipkinexporter/README.md | 2 +- .../exporter/zipkinexporter/config.go | 4 +- .../exporter/zipkinexporter/factory.go | 8 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/generated_telemetry.go | 17 - .../exporter/zipkinexporter/metadata.yaml | 1 - .../exporter/zipkinexporter/zipkin.go | 2 +- .../common/localhostgate/featuregate.go | 68 - .../internal/common/testutil/testutil.go | 153 + .../coreinternal/parseutils/parser.go | 21 +- .../internal/coreinternal/parseutils/uri.go | 164 + .../coreinternal/textutils/encoding.go | 11 +- .../timeutils/internal/ctimefmt/ctimefmt.go | 34 +- .../internal/coreinternal/timeutils/parser.go | 90 +- .../internal/filter/expr/matcher.go | 10 + .../internal/filter/filterlog/filterlog.go | 1 - .../filter/filtermatcher/attributematcher.go | 2 - .../internal/filter/filterottl/filter.go | 30 +- .../internal/filter/filterottl/functions.go | 14 +- .../internal/filter/filterset/config.go | 4 +- .../internal/filter/filterspan/filterspan.go | 2 +- .../internal/kafka/authentication.go | 53 +- .../internal/kafka/scram_client.go | 1 - .../pkg/kafka/topic/LICENSE | 201 + .../pkg/kafka/topic}/Makefile | 0 .../pkg/kafka/topic/README.md | 4 + .../pkg/kafka/topic/kafka_ctx.go | 19 + .../pkg/kafka/topic/metadata.yaml | 6 + .../pkg/ottl/CONTRIBUTING.md | 53 +- .../pkg/ottl/LANGUAGE.md | 38 +- .../pkg/ottl/README.md | 141 +- .../pkg/ottl/boolean_value.go | 1 - .../pkg/ottl/compare.go | 23 +- .../pkg/ottl/context_inferrer.go | 77 + .../ottl/contexts/internal/logging/logging.go | 390 + .../pkg/ottl/contexts/internal/metric.go | 32 +- .../pkg/ottl/contexts/internal/path.go | 5 + .../pkg/ottl/contexts/internal/resource.go | 29 +- .../pkg/ottl/contexts/internal/schema.go | 11 + .../pkg/ottl/contexts/internal/scope.go | 37 +- .../pkg/ottl/contexts/internal/span.go | 104 +- .../ottl/contexts/ottldatapoint/datapoint.go | 136 +- .../pkg/ottl/contexts/ottllog/log.go | 119 +- .../pkg/ottl/contexts/ottlmetric/metrics.go | 26 +- .../ottl/contexts/ottlresource/resource.go | 34 +- .../pkg/ottl/contexts/ottlscope/README.md | 27 + .../pkg/ottl/contexts/ottlscope/scope.go | 168 + .../pkg/ottl/contexts/ottlspan/span.go | 36 +- .../contexts/ottlspanevent/span_events.go | 60 +- .../pkg/ottl/expression.go | 165 +- .../pkg/ottl/functions.go | 116 +- .../pkg/ottl/grammar.go | 280 +- .../pkg/ottl/metadata.yaml | 3 +- .../pkg/ottl/ottlfuncs/README.md | 1000 +- .../pkg/ottl/ottlfuncs/func_append.go | 135 + .../pkg/ottl/ottlfuncs/func_base64decode.go | 1 - ...func_convert_attributes_to_elements_xml.go | 69 + .../func_convert_text_to_elements_xml.go | 107 + .../pkg/ottl/ottlfuncs/func_day.go | 39 + .../pkg/ottl/ottlfuncs/func_decode.go | 103 + .../pkg/ottl/ottlfuncs/func_duration.go | 1 + .../ottlfuncs/func_extract_grok_patterns.go | 103 + .../pkg/ottl/ottlfuncs/func_flatten.go | 8 +- .../pkg/ottl/ottlfuncs/func_fnv.go | 1 - .../pkg/ottl/ottlfuncs/func_format.go | 45 + .../pkg/ottl/ottlfuncs/func_get_xml.go | 70 + .../pkg/ottl/ottlfuncs/func_hex.go | 40 + .../pkg/ottl/ottlfuncs/func_hour.go | 1 + .../pkg/ottl/ottlfuncs/func_hours.go | 1 + .../pkg/ottl/ottlfuncs/func_insert_xml.go | 75 + .../pkg/ottl/ottlfuncs/func_is_list.go | 53 + .../pkg/ottl/ottlfuncs/func_is_root_span.go | 25 + .../pkg/ottl/ottlfuncs/func_keep_keys.go | 2 +- .../ottl/ottlfuncs/func_keep_matching_keys.go | 51 + .../pkg/ottl/ottlfuncs/func_limit.go | 2 +- .../pkg/ottl/ottlfuncs/func_md5.go | 46 + .../pkg/ottl/ottlfuncs/func_microseconds.go | 1 + .../pkg/ottl/ottlfuncs/func_milliseconds.go | 1 + .../pkg/ottl/ottlfuncs/func_minute.go | 39 + .../pkg/ottl/ottlfuncs/func_minutes.go | 1 + .../pkg/ottl/ottlfuncs/func_month.go | 39 + .../pkg/ottl/ottlfuncs/func_nanoseconds.go | 1 + .../pkg/ottl/ottlfuncs/func_now.go | 2 +- .../pkg/ottl/ottlfuncs/func_parse_json.go | 23 +- .../ottlfuncs/func_parse_simplified_xml.go | 134 + .../pkg/ottl/ottlfuncs/func_remove_xml.go | 95 + .../ottlfuncs/func_replace_all_matches.go | 2 +- .../ottlfuncs/func_replace_all_patterns.go | 2 +- .../pkg/ottl/ottlfuncs/func_seconds.go | 1 + .../pkg/ottl/ottlfuncs/func_sha1.go | 1 - .../pkg/ottl/ottlfuncs/func_sha256.go | 1 - .../pkg/ottl/ottlfuncs/func_sha512.go | 47 + .../pkg/ottl/ottlfuncs/func_slice_to_map.go | 105 + .../pkg/ottl/ottlfuncs/func_sort.go | 253 + .../pkg/ottl/ottlfuncs/func_string.go | 42 + .../pkg/ottl/ottlfuncs/func_time.go | 41 +- .../ottlfuncs/func_to_key_value_string.go | 121 + .../pkg/ottl/ottlfuncs/func_truncate_all.go | 2 +- .../pkg/ottl/ottlfuncs/func_truncate_time.go | 1 + .../pkg/ottl/ottlfuncs/func_unix.go | 51 + .../pkg/ottl/ottlfuncs/func_unix_micro.go | 1 + .../pkg/ottl/ottlfuncs/func_unix_milli.go | 1 + .../pkg/ottl/ottlfuncs/func_unix_nano.go | 1 + .../pkg/ottl/ottlfuncs/func_unix_seconds.go | 1 + .../pkg/ottl/ottlfuncs/func_url.go | 44 + .../pkg/ottl/ottlfuncs/func_useragent.go | 47 + .../pkg/ottl/ottlfuncs/func_uuid.go | 2 +- .../pkg/ottl/ottlfuncs/func_year.go | 39 + .../pkg/ottl/ottlfuncs/functions.go | 26 + .../pkg/ottl/parser.go | 99 +- .../pkg/ottl/parser_collection.go | 334 + .../pkg/ottl/paths.go | 32 + .../pkg/pdatautil/hash.go | 56 +- .../translator/azure/resourcelogs_to_logs.go | 42 +- .../translator/azure/resources_to_traces.go | 183 + .../pkg/translator/jaeger/constants.go | 6 +- .../jaeger/jaegerproto_to_traces.go | 6 +- .../jaeger/traces_to_jaegerproto.go | 14 +- .../pkg/translator/opencensus/oc_to_traces.go | 2 +- .../pkg/translator/opencensus/traces_to_oc.go | 3 +- .../pkg/translator/zipkin/zipkinv1/json.go | 1 - .../zipkin/zipkinv2/from_translator.go | 6 +- .../processor/filterprocessor/README.md | 79 +- .../filterprocessor/documentation.md | 31 + .../processor/filterprocessor/factory.go | 12 +- .../internal/metadata/generated_status.go | 13 +- .../internal/metadata/generated_telemetry.go | 79 + .../processor/filterprocessor/logs.go | 11 +- .../processor/filterprocessor/metadata.yaml | 27 +- .../processor/filterprocessor/metrics.go | 21 +- .../processor/filterprocessor/telemetry.go | 85 +- .../processor/filterprocessor/traces.go | 13 +- .../receiver/jaegerreceiver/README.md | 16 +- .../receiver/jaegerreceiver/config.go | 15 +- .../receiver/jaegerreceiver/factory.go | 25 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/generated_telemetry.go | 17 - .../receiver/jaegerreceiver/metadata.yaml | 5 +- .../receiver/jaegerreceiver/trace_receiver.go | 35 +- .../receiver/kafkareceiver/README.md | 18 +- .../receiver/kafkareceiver/config.go | 11 + .../receiver/kafkareceiver/documentation.md | 71 + .../receiver/kafkareceiver/factory.go | 122 +- .../kafkareceiver/header_extraction.go | 4 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/generated_telemetry.go | 103 +- .../kafkareceiver/jaeger_unmarshaler.go | 6 +- .../kafkareceiver/json_unmarshaler.go | 3 +- .../receiver/kafkareceiver/kafka_receiver.go | 324 +- .../receiver/kafkareceiver/metadata.yaml | 63 +- .../receiver/kafkareceiver/metrics.go | 107 - .../receiver/kafkareceiver/unmarshaler.go | 54 +- .../receiver/opencensusreceiver/README.md | 10 +- .../receiver/opencensusreceiver/factory.go | 10 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/generated_telemetry.go | 17 - .../internal/ocmetrics/opencensus.go | 2 +- .../internal/octrace/opencensus.go | 3 +- .../receiver/opencensusreceiver/metadata.yaml | 5 +- .../receiver/opencensusreceiver/opencensus.go | 20 +- .../receiver/opencensusreceiver/options.go | 1 + .../receiver/zipkinreceiver/README.md | 7 +- .../receiver/zipkinreceiver/factory.go | 8 +- .../internal/metadata/generated_status.go | 3 +- .../internal/metadata/generated_telemetry.go | 17 - .../receiver/zipkinreceiver/metadata.yaml | 5 +- .../receiver/zipkinreceiver/trace_receiver.go | 14 +- .../pelletier/go-toml/v2/.goreleaser.yaml | 1 + .../github.com/pelletier/go-toml/v2/README.md | 2 +- .../pelletier/go-toml/v2/marshaler.go | 24 +- .../pelletier/go-toml/v2/unmarshaler.go | 45 +- .../power-devops/perfstat/config.go | 1 + .../power-devops/perfstat/cpustat.go | 2 +- .../power-devops/perfstat/diskstat.go | 1 + .../github.com/power-devops/perfstat/doc.go | 13 +- .../power-devops/perfstat/fsstat.go | 1 + .../power-devops/perfstat/helpers.go | 55 + .../power-devops/perfstat/lparstat.go | 14 + .../power-devops/perfstat/lvmstat.go | 1 + .../power-devops/perfstat/memstat.go | 1 + .../power-devops/perfstat/netstat.go | 1 + .../power-devops/perfstat/procstat.go | 1 + .../power-devops/perfstat/sysconf.go | 1 + .../power-devops/perfstat/systemcfg.go | 59 +- .../power-devops/perfstat/types_disk.go | 4 +- .../power-devops/perfstat/types_lpar.go | 61 + .../power-devops/perfstat/uptime.go | 1 + .../prometheus/client_golang/NOTICE | 5 - .../internal/github.com/golang/gddo/LICENSE | 27 + .../golang/gddo/httputil/header/header.go | 145 + .../golang/gddo/httputil/negotiate.go | 36 + .../collectors/go_collector_latest.go | 4 +- .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_latest.go | 19 +- .../client_golang/prometheus/histogram.go | 268 +- .../internal/go_collector_options.go | 2 + .../client_golang/prometheus/metric.go | 2 +- .../prometheus/process_collector.go | 29 +- .../prometheus/process_collector_other.go | 14 + .../prometheus/promhttp/delegator.go | 6 + .../client_golang/prometheus/promhttp/http.go | 113 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 42 + .../testutil/promlint/validation.go | 1 + .../validations/duplicate_validations.go | 37 + .../validations/generic_name_validations.go | 20 +- .../prometheus/testutil/testutil.go | 108 +- .../client_golang/prometheus/vec.go | 2 +- .../prometheus/common/config/headers.go | 43 +- .../prometheus/common/config/http_config.go | 62 +- .../prometheus/common/expfmt/decode.go | 14 +- .../prometheus/common/expfmt/encode.go | 28 +- .../prometheus/common/expfmt/expfmt.go | 78 +- .../common/expfmt/openmetrics_create.go | 6 +- .../prometheus/common/expfmt/text_create.go | 4 +- .../prometheus/common/expfmt/text_parse.go | 164 +- .../prometheus/common/model/alert.go | 7 +- .../prometheus/common/model/labels.go | 27 +- .../common/model/labelset_string.go | 2 - .../common/model/labelset_string_go120.go | 39 - .../prometheus/common/model/metric.go | 62 +- .../prometheus/common/model/silence.go | 17 +- .../prometheus/common/model/value_float.go | 3 +- .../common/model/value_histogram.go | 7 +- .../prometheus/common/version/info.go | 8 + .../prometheus/prometheus/promql/engine.go | 5 + .../promql/histogram_stats_iterator.go | 22 +- .../prometheus/prometheus/scrape/scrape.go | 2 +- .../prometheus/tsdb/tsdbutil/histogram.go | 10 +- .../github.com/relvacode/iso8601/.gitignore | 2 + vendor/github.com/relvacode/iso8601/README.md | 22 +- .../github.com/relvacode/iso8601/iso8601.go | 42 +- vendor/github.com/rs/cors/cors.go | 16 +- .../github.com/rs/cors/internal/sortedset.go | 152 +- vendor/github.com/rs/cors/utils.go | 9 - .../shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go | 92 - .../shirou/gopsutil/v3/cpu/cpu_darwin.go | 117 - .../shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go | 111 - .../gopsutil/v3/cpu/cpu_darwin_nocgo.go | 14 - .../v3/internal/common/common_darwin.go | 66 - .../shirou/gopsutil/v3/mem/mem_darwin.go | 72 - .../shirou/gopsutil/v3/mem/mem_darwin_cgo.go | 58 - .../gopsutil/v3/mem/mem_darwin_nocgo.go | 89 - .../shirou/gopsutil/v3/net/net_linux_111.go | 12 - .../shirou/gopsutil/v3/net/net_linux_116.go | 12 - .../gopsutil/v3/process/process_darwin.go | 325 - .../gopsutil/v3/process/process_darwin_cgo.go | 222 - .../v3/process/process_darwin_nocgo.go | 127 - .../shirou/gopsutil/{v3 => v4}/LICENSE | 0 .../shirou/gopsutil/{v3 => v4}/common/env.go | 16 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu.go | 3 +- .../shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go | 2 +- .../gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go | 2 +- .../shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go | 157 + .../shirou/gopsutil/v4/cpu/cpu_darwin.go | 198 + .../gopsutil/v4/cpu/cpu_darwin_arm64.go | 80 + .../gopsutil/v4/cpu/cpu_darwin_fallback.go | 13 + .../gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go | 3 +- .../{v3 => v4}/cpu/cpu_dragonfly_amd64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_fallback.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_freebsd.go | 10 +- .../{v3 => v4}/cpu/cpu_freebsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_freebsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_linux.go | 10 +- .../gopsutil/{v3 => v4}/cpu/cpu_netbsd.go | 5 +- .../{v3 => v4}/cpu/cpu_netbsd_amd64.go | 1 + .../shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go | 10 + .../{v3 => v4}/cpu/cpu_netbsd_arm64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_openbsd.go | 4 +- .../{v3 => v4}/cpu/cpu_openbsd_386.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_amd64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_arm64.go | 1 + .../{v3 => v4}/cpu/cpu_openbsd_riscv64.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_plan9.go | 4 +- .../gopsutil/{v3 => v4}/cpu/cpu_solaris.go | 1 + .../gopsutil/{v3 => v4}/cpu/cpu_windows.go | 8 +- .../{v3 => v4}/internal/common/binary.go | 1 + .../{v3 => v4}/internal/common/common.go | 16 +- .../v4/internal/common/common_darwin.go | 364 + .../internal/common/common_freebsd.go | 2 +- .../internal/common/common_linux.go | 7 +- .../internal/common/common_netbsd.go | 2 +- .../internal/common/common_openbsd.go | 2 +- .../{v3 => v4}/internal/common/common_unix.go | 22 +- .../internal/common/common_windows.go | 2 +- .../{v3 => v4}/internal/common/endian.go | 1 + .../{v3 => v4}/internal/common/sleep.go | 1 + .../{v3 => v4}/internal/common/warnings.go | 1 + .../shirou/gopsutil/v4/mem/ex_linux.go | 40 + .../shirou/gopsutil/v4/mem/ex_windows.go | 51 + .../shirou/gopsutil/{v3 => v4}/mem/mem.go | 3 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_aix.go | 8 +- .../gopsutil/{v3 => v4}/mem/mem_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go | 12 +- .../shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go | 2 +- .../shirou/gopsutil/v4/mem/mem_darwin.go | 130 + .../gopsutil/{v3 => v4}/mem/mem_fallback.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_freebsd.go | 6 +- .../gopsutil/{v3 => v4}/mem/mem_linux.go | 36 +- .../gopsutil/{v3 => v4}/mem/mem_netbsd.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_openbsd.go | 4 +- .../{v3 => v4}/mem/mem_openbsd_386.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_amd64.go | 1 + .../{v3 => v4}/mem/mem_openbsd_arm.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_arm64.go | 2 +- .../{v3 => v4}/mem/mem_openbsd_riscv64.go | 2 +- .../gopsutil/{v3 => v4}/mem/mem_plan9.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_solaris.go | 4 +- .../gopsutil/{v3 => v4}/mem/mem_windows.go | 40 +- .../shirou/gopsutil/{v3 => v4}/net/net.go | 3 +- .../shirou/gopsutil/{v3 => v4}/net/net_aix.go | 26 +- .../gopsutil/{v3 => v4}/net/net_aix_cgo.go | 2 +- .../gopsutil/{v3 => v4}/net/net_aix_nocgo.go | 4 +- .../gopsutil/{v3 => v4}/net/net_darwin.go | 14 +- .../gopsutil/{v3 => v4}/net/net_fallback.go | 24 +- .../gopsutil/{v3 => v4}/net/net_freebsd.go | 8 +- .../gopsutil/{v3 => v4}/net/net_linux.go | 58 +- .../gopsutil/{v3 => v4}/net/net_openbsd.go | 11 +- .../gopsutil/{v3 => v4}/net/net_solaris.go | 4 +- .../gopsutil/{v3 => v4}/net/net_unix.go | 36 +- .../gopsutil/{v3 => v4}/net/net_windows.go | 24 +- .../gopsutil/{v3 => v4}/process/process.go | 45 +- .../{v3 => v4}/process/process_bsd.go | 6 +- .../gopsutil/v4/process/process_darwin.go | 480 + .../process/process_darwin_amd64.go | 22 + .../process/process_darwin_arm64.go | 23 +- .../{v3 => v4}/process/process_fallback.go | 16 +- .../{v3 => v4}/process/process_freebsd.go | 57 +- .../{v3 => v4}/process/process_freebsd_386.go | 1 + .../process/process_freebsd_amd64.go | 1 + .../{v3 => v4}/process/process_freebsd_arm.go | 1 + .../process/process_freebsd_arm64.go | 1 + .../{v3 => v4}/process/process_linux.go | 83 +- .../{v3 => v4}/process/process_openbsd.go | 58 +- .../{v3 => v4}/process/process_openbsd_386.go | 3 +- .../process/process_openbsd_amd64.go | 2 + .../{v3 => v4}/process/process_openbsd_arm.go | 3 +- .../process/process_openbsd_arm64.go | 3 +- .../process/process_openbsd_riscv64.go | 3 +- .../{v3 => v4}/process/process_plan9.go | 16 +- .../{v3 => v4}/process/process_posix.go | 4 +- .../{v3 => v4}/process/process_solaris.go | 15 +- .../{v3 => v4}/process/process_windows.go | 32 +- .../process/process_windows_32bit.go | 4 +- .../process/process_windows_64bit.go | 4 +- .../shoenig/go-m1cpu/.golangci.yaml | 12 - vendor/github.com/shoenig/go-m1cpu/LICENSE | 363 - vendor/github.com/shoenig/go-m1cpu/Makefile | 12 - vendor/github.com/shoenig/go-m1cpu/README.md | 66 - vendor/github.com/shoenig/go-m1cpu/cpu.go | 213 - .../shoenig/go-m1cpu/incompatible.go | 53 - vendor/github.com/spf13/cobra/.golangci.yml | 21 +- vendor/github.com/spf13/cobra/active_help.go | 13 +- vendor/github.com/spf13/cobra/args.go | 4 +- .../spf13/cobra/bash_completions.go | 25 +- vendor/github.com/spf13/cobra/cobra.go | 2 - vendor/github.com/spf13/cobra/command.go | 51 +- vendor/github.com/spf13/cobra/completions.go | 62 +- vendor/github.com/spf13/cobra/flag_groups.go | 34 +- .../spf13/cobra/powershell_completions.go | 4 +- .../testify/assert/assertion_compare.go | 35 +- .../testify/assert/assertion_format.go | 34 +- .../testify/assert/assertion_forward.go | 68 +- .../testify/assert/assertion_order.go | 10 +- .../stretchr/testify/assert/assertions.go | 157 +- .../testify/assert/yaml/yaml_custom.go | 25 + .../testify/assert/yaml/yaml_default.go | 37 + .../stretchr/testify/assert/yaml/yaml_fail.go | 18 + .../github.com/stretchr/testify/mock/mock.go | 155 +- .../stretchr/testify/require/require.go | 432 +- .../stretchr/testify/require/require.go.tmpl | 2 +- .../testify/require/require_forward.go | 68 +- .../stretchr/testify/require/requirements.go | 2 +- .../tklauser/go-sysconf/.cirrus.yml | 10 +- .../tklauser/go-sysconf/sysconf_bsd.go | 1 - .../tklauser/go-sysconf/sysconf_darwin.go | 37 +- .../tklauser/go-sysconf/sysconf_generic.go | 1 - .../tklauser/go-sysconf/sysconf_linux.go | 20 +- .../tklauser/go-sysconf/sysconf_netbsd.go | 18 +- .../tklauser/go-sysconf/sysconf_posix.go | 1 - .../go-sysconf/sysconf_unsupported.go | 1 - .../go-sysconf/zsysconf_defs_darwin.go | 2 - .../go-sysconf/zsysconf_defs_dragonfly.go | 1 - .../go-sysconf/zsysconf_defs_freebsd.go | 1 - .../go-sysconf/zsysconf_defs_linux.go | 1 - .../go-sysconf/zsysconf_defs_netbsd.go | 1 - .../go-sysconf/zsysconf_defs_openbsd.go | 1 - .../go-sysconf/zsysconf_defs_solaris.go | 1 - .../go-sysconf/zsysconf_values_freebsd_386.go | 1 - .../zsysconf_values_freebsd_amd64.go | 1 - .../go-sysconf/zsysconf_values_freebsd_arm.go | 1 - .../zsysconf_values_freebsd_arm64.go | 1 - .../zsysconf_values_freebsd_riscv64.go | 1 - .../go-sysconf/zsysconf_values_linux_386.go | 1 - .../go-sysconf/zsysconf_values_linux_amd64.go | 1 - .../go-sysconf/zsysconf_values_linux_arm.go | 1 - .../go-sysconf/zsysconf_values_linux_arm64.go | 1 - .../zsysconf_values_linux_loong64.go | 1 - .../go-sysconf/zsysconf_values_linux_mips.go | 1 - .../zsysconf_values_linux_mips64.go | 1 - .../zsysconf_values_linux_mips64le.go | 1 - .../zsysconf_values_linux_mipsle.go | 1 - .../go-sysconf/zsysconf_values_linux_ppc64.go | 1 - .../zsysconf_values_linux_ppc64le.go | 1 - .../zsysconf_values_linux_riscv64.go | 1 - .../go-sysconf/zsysconf_values_linux_s390x.go | 1 - .../go-sysconf/zsysconf_values_netbsd_386.go | 1 - .../zsysconf_values_netbsd_amd64.go | 1 - .../go-sysconf/zsysconf_values_netbsd_arm.go | 1 - .../zsysconf_values_netbsd_arm64.go | 1 - .../github.com/tklauser/numcpus/.cirrus.yml | 20 +- .../tklauser/numcpus/numcpus_bsd.go | 1 - .../tklauser/numcpus/numcpus_linux.go | 13 +- .../tklauser/numcpus/numcpus_solaris.go | 1 - .../tklauser/numcpus/numcpus_unsupported.go | 1 - vendor/github.com/ua-parser/uap-go/LICENSE | 16 + .../ua-parser/uap-go/uaparser/.gitignore | 2 + .../ua-parser/uap-go/uaparser/LICENSE.md | 8 + .../ua-parser/uap-go/uaparser/cache.go | 36 + .../ua-parser/uap-go/uaparser/device.go | 30 + .../ua-parser/uap-go/uaparser/os.go | 49 + .../ua-parser/uap-go/uaparser/parser.go | 394 + .../ua-parser/uap-go/uaparser/user_agent.go | 44 + .../ua-parser/uap-go/uaparser/yaml.go | 3736 +++++++ .../auto/sdk/CONTRIBUTING.md | 27 + vendor/go.opentelemetry.io/auto/sdk/LICENSE | 201 + .../auto/sdk/VERSIONING.md | 15 + vendor/go.opentelemetry.io/auto/sdk/doc.go | 14 + .../auto/sdk/internal/telemetry/attr.go | 58 + .../auto/sdk/internal/telemetry/doc.go | 8 + .../auto/sdk/internal/telemetry/id.go | 103 + .../auto/sdk/internal/telemetry/number.go | 67 + .../auto/sdk/internal/telemetry/resource.go | 66 + .../auto/sdk/internal/telemetry/scope.go | 67 + .../auto/sdk/internal/telemetry/span.go | 456 + .../auto/sdk/internal/telemetry/status.go | 40 + .../auto/sdk/internal/telemetry/traces.go | 189 + .../auto/sdk/internal/telemetry/value.go | 452 + vendor/go.opentelemetry.io/auto/sdk/limit.go | 94 + vendor/go.opentelemetry.io/auto/sdk/span.go | 432 + vendor/go.opentelemetry.io/auto/sdk/tracer.go | 124 + .../auto/sdk/tracer_provider.go | 33 + .../collector/client/LICENSE | 202 + .../collector/client/Makefile | 1 + .../collector/component/component.go | 55 +- .../component/componentstatus/LICENSE | 202 + .../component/componentstatus/Makefile | 1 + .../component/componentstatus/instance.go | 91 + .../component/componentstatus/status.go | 160 + .../collector/component/componenttest/LICENSE | 202 + .../component/componenttest/Makefile | 1 + .../component/componenttest/configtest.go | 2 - .../component/componenttest/nop_host.go | 4 - .../component/componenttest/nop_telemetry.go | 2 - .../component/componenttest/obsreporttest.go | 116 +- .../component/componenttest/otelchecker.go | 173 + .../componenttest/otelprometheuschecker.go | 202 - .../collector/component/config.go | 83 +- .../collector/component/host.go | 19 +- .../collector/component/identifiable.go | 70 + .../collector/component/status.go | 190 - .../collector/component/telemetry.go | 14 +- .../collector/config/configauth/configauth.go | 5 +- .../configcompression/compressiontype.go | 3 +- .../collector/config/configgrpc/README.md | 4 +- .../collector/config/configgrpc/configgrpc.go | 167 +- .../config/configgrpc/internal/zstd.go | 83 - .../collector/config/confighttp/README.md | 19 +- .../config/confighttp/compress_readcloser.go | 23 + .../config/confighttp/compression.go | 111 +- .../collector/config/confighttp/compressor.go | 10 + .../collector/config/confighttp/confighttp.go | 224 +- .../config/confighttp/internal/options.go | 38 + .../collector/config/confignet/README.md | 3 +- .../collector/config/configretry/backoff.go | 1 - .../collector/config/configtelemetry/doc.go | 40 + .../config/configtls/clientcasfilereloader.go | 5 +- .../collector/config/configtls/configtls.go | 12 +- .../collector/config/internal/warning.go | 7 +- .../collector/confmap/README.md | 57 + .../collector/confmap/confmap.go | 144 +- .../converter/expandconverter/expand.go | 114 - .../collector/confmap/expand.go | 114 +- .../confmap/internal/envvar/pattern.go | 10 - .../confmap/internal/mapstructure/encoder.go | 37 +- .../collector/confmap/provider.go | 75 +- .../confmap/provider/envprovider/provider.go | 66 - .../confmap/provider/fileprovider/provider.go | 64 - .../confmap/provider/httpprovider/Makefile | 1 - .../confmap/provider/httpprovider/README.md | 13 - .../confmap/provider/httpprovider/provider.go | 22 - .../confmap/provider/httpsprovider/Makefile | 1 - .../confmap/provider/httpsprovider/README.md | 18 - .../provider/httpsprovider/provider.go | 23 - .../configurablehttpprovider/provider.go | 121 - .../confmap/provider/internal/provider.go | 21 - .../confmap/provider/yamlprovider/Makefile | 1 - .../confmap/provider/yamlprovider/provider.go | 50 - .../collector/confmap/resolver.go | 41 +- .../collector/connector/README.md | 2 +- .../collector/connector/connector.go | 447 +- .../collector/connector/connectortest/LICENSE | 202 + .../connector/connectortest/Makefile | 1 + .../connector/connectortest/connector.go | 128 + .../collector/connector/internal/factory.go | 15 + .../connector/{ => internal}/router.go | 27 +- .../collector/connector/logs_router.go | 26 +- .../collector/connector/metrics_router.go | 13 +- .../collector/connector/traces_router.go | 13 +- .../collector/connector/xconnector/LICENSE | 202 + .../collector/connector/xconnector/Makefile | 1 + .../connector/xconnector/connector.go | 331 + .../connector/xconnector/profiles_router.go | 36 + .../collector/consumer/consumer.go | 44 +- .../collector/consumer/consumererror/LICENSE | 202 + .../collector/consumer/consumererror/Makefile | 1 + .../consumererror/internal/retryable.go | 31 + .../consumer/consumererror/signalerrors.go | 40 +- .../consumererror/xconsumererror/LICENSE | 202 + .../consumererror/xconsumererror}/Makefile | 0 .../xconsumererror/signalerrors.go | 25 + .../collector/consumer/consumertest/LICENSE | 202 + .../collector/consumer/consumertest/Makefile | 1 + .../consumer/consumertest/consumer.go | 15 +- .../collector/consumer/consumertest/err.go | 8 +- .../collector/consumer/consumertest/nop.go | 8 +- .../collector/consumer/consumertest/sink.go | 50 + .../collector/consumer/internal/consumer.go | 50 + .../collector/consumer/logs.go | 7 +- .../collector/consumer/metrics.go | 7 +- .../collector/consumer/traces.go | 7 +- .../collector/consumer/xconsumer/LICENSE | 202 + .../collector/consumer/xconsumer/Makefile | 1 + .../collector/consumer/xconsumer/profiles.go | 47 + .../collector/exporter/exporter.go | 163 +- .../exporter/exporterbatcher/batch_func.go | 24 - .../exporter/exporterhelper/README.md | 1 - .../exporter/exporterhelper/common.go | 300 +- .../exporter/exporterhelper/documentation.md | 70 +- .../exporter/exporterhelper/exporterhelper.go | 18 + .../exporterhelper/internal/base_exporter.go | 311 + .../{ => internal}/batch_sender.go | 120 +- .../internal/metadata/generated_telemetry.go | 150 +- .../exporterhelper/internal/obsexporter.go | 170 + .../exporterhelper/internal/obsmetrics.go} | 30 +- .../exporterhelper/internal/queue_sender.go | 200 + .../exporterhelper/internal/request.go | 152 + .../exporterhelper/internal/request_sender.go | 33 + .../exporterhelper/internal/retry_sender.go | 142 + .../exporterhelper/internal/test_util.go | 20 + .../exporterhelper/internal/timeout_sender.go | 52 + .../collector/exporter/exporterhelper/logs.go | 50 +- .../exporter/exporterhelper/logs_batch.go | 18 +- .../exporter/exporterhelper/metadata.yaml | 60 +- .../exporter/exporterhelper/metrics.go | 50 +- .../exporter/exporterhelper/metrics_batch.go | 18 +- .../exporter/exporterhelper/obsexporter.go | 169 - .../exporter/exporterhelper/queue_sender.go | 179 +- .../exporter/exporterhelper/retry_sender.go | 131 +- .../exporter/exporterhelper/timeout_sender.go | 45 +- .../exporter/exporterhelper/traces.go | 58 +- .../exporter/exporterhelper/traces_batch.go | 18 +- .../exporterhelper/xexporterhelper/LICENSE | 202 + .../exporterhelper/xexporterhelper}/Makefile | 0 .../xexporterhelper/constants.go | 19 + .../xexporterhelper/profiles.go | 164 + .../xexporterhelper/profiles_batch.go | 142 + .../exporter/exporterqueue/config.go | 2 +- .../collector/exporter/exporterqueue/queue.go | 33 +- .../collector/exporter/exportertest/LICENSE | 202 + .../collector/exporter/exportertest/Makefile | 1 + .../exporter/exportertest/contract_checker.go | 33 +- .../exporter/exportertest/mock_consumer.go | 8 +- .../exporter/exportertest/nop_exporter.go | 40 +- .../exporter/internal/queue/batcher.go | 97 + .../internal/queue/bounded_memory_queue.go | 17 +- .../exporter/internal/queue/consumers.go | 16 +- .../internal/queue/default_batcher.go | 180 + .../internal/queue/disabled_batcher.go | 52 + .../exporter/internal/queue/mock_storage.go | 181 +- .../internal/queue/persistent_queue.go | 139 +- .../exporter/internal/queue/queue.go | 28 +- .../exporter/internal/queue/sized_channel.go | 12 +- .../{exporterhelper => internal}/request.go | 28 +- .../collector/exporter/otlpexporter/README.md | 16 +- .../exporter/otlpexporter/cfg-schema.yaml | 6 +- .../collector/exporter/otlpexporter/config.go | 17 +- .../collector/exporter/otlpexporter/doc.go | 2 +- .../exporter/otlpexporter/factory.go | 71 +- .../internal/metadata/generated_status.go | 10 +- .../exporter/otlpexporter/metadata.yaml | 6 +- .../collector/exporter/otlpexporter/otlp.go | 42 +- .../exporter/otlphttpexporter/README.md | 14 +- .../exporter/otlphttpexporter/config.go | 6 +- .../exporter/otlphttpexporter/factory.go | 103 +- .../internal/metadata/generated_status.go | 10 +- .../exporter/otlphttpexporter/metadata.yaml | 6 +- .../exporter/otlphttpexporter/otlp.go | 70 +- .../collector/exporter/xexporter/LICENSE | 202 + .../collector/exporter/xexporter/Makefile | 1 + .../collector/exporter/xexporter/exporter.go | 109 + .../collector/extension/README.md | 3 +- .../collector/extension/auth/client.go | 28 +- .../collector/extension/auth/server.go | 36 +- .../extension/experimental/storage/LICENSE | 202 + .../extension/experimental/storage/Makefile | 2 +- .../extension/experimental/storage/storage.go | 1 - .../collector/extension/extension.go | 114 +- .../extension/extensioncapabilities/LICENSE | 202 + .../extension/extensioncapabilities/Makefile | 1 + .../extensioncapabilities/interfaces.go | 49 + .../collector/extension/extensiontest/LICENSE | 202 + .../extension/extensiontest/Makefile | 1 + .../extension/extensiontest/nop_extension.go | 48 + .../collector/featuregate/flag.go | 6 + .../collector/featuregate/registry.go | 10 +- .../collector/internal/fanoutconsumer/LICENSE | 202 + .../internal/fanoutconsumer/Makefile | 1 + .../collector/internal/fanoutconsumer/logs.go | 2 +- .../internal/fanoutconsumer/metrics.go | 2 +- .../internal/fanoutconsumer/profiles.go | 82 + .../internal/fanoutconsumer/traces.go | 2 +- .../internal/localhostgate/featuregate.go | 68 - .../obsmetrics/obs_processor.go | 22 - .../obsreportconfig/obsmetrics/obs_scraper.go | 22 - .../obsreportconfig/obsmetrics/obsmetrics.go | 13 - .../obsreportconfig/obsreportconfig.go | 24 - .../internal/sharedcomponent/LICENSE | 202 + .../internal/sharedcomponent/Makefile | 1 + .../sharedcomponent/sharedcomponent.go | 114 +- .../collector/otelcol/buffered_core.go | 4 +- .../collector/otelcol/collector.go | 57 +- .../collector/otelcol/collector_windows.go | 20 +- .../collector/otelcol/command.go | 13 +- .../collector/otelcol/command_components.go | 37 +- .../collector/otelcol/config.go | 6 +- .../collector/otelcol/configprovider.go | 45 - .../collector/otelcol/factories.go | 15 + .../internal/configunmarshaler/configs.go | 14 +- .../otelcol/internal/grpclog/logger.go | 2 +- .../collector/otelcol/unmarshaler.go | 1 - .../pdata/internal/data/profileid.go | 79 + .../profiles_service.pb.go | 98 +- .../data/protogen/metrics/v1/metrics.pb.go | 25 +- .../profiles.pb.go} | 3532 +++--- .../profiles/v1experimental/profiles.pb.go | 1482 --- .../internal/generated_wrapper_int32slice.go | 34 + .../internal/generated_wrapper_intslice.go | 34 + .../collector/pdata/internal/json/enum.go | 2 +- .../collector/pdata/internal/otlp/profiles.go | 12 + .../pdata/internal/wrapper_profiles.go | 4 +- .../pdata/pcommon/generated_int32slice.go | 108 + .../pdata/pcommon/generated_intslice.go | 108 + .../collector/pdata/pcommon/map.go | 9 + .../collector/pdata/pcommon/timestamp.go | 2 + .../collector/pdata/pcommon/value.go | 4 +- .../collector/pdata/pprofile/LICENSE | 202 + .../collector/pdata/pprofile/Makefile | 1 + .../collector/pdata/pprofile/encoding.go | 31 + .../pdata/pprofile/generated_attribute.go | 70 + .../pprofile/generated_attributetableslice.go | 136 + .../pdata/pprofile/generated_attributeunit.go | 75 + .../pprofile/generated_attributeunitslice.go | 152 + .../pdata/pprofile/generated_function.go | 99 + .../pdata/pprofile/generated_functionslice.go | 152 + .../pdata/pprofile/generated_line.go | 87 + .../pdata/pprofile/generated_lineslice.go | 152 + .../pdata/pprofile/generated_link.go | 77 + .../pdata/pprofile/generated_linkslice.go | 152 + .../pdata/pprofile/generated_location.go | 115 + .../pdata/pprofile/generated_locationslice.go | 152 + .../pdata/pprofile/generated_mapping.go | 154 + .../pdata/pprofile/generated_mappingslice.go | 152 + .../pdata/pprofile/generated_profile.go | 233 + .../pdata/pprofile/generated_profilesslice.go | 152 + .../pprofile/generated_resourceprofiles.go | 76 + .../generated_resourceprofilesslice.go | 152 + .../pdata/pprofile/generated_sample.go | 94 + .../pdata/pprofile/generated_sampleslice.go | 152 + .../pdata/pprofile/generated_scopeprofiles.go | 76 + .../pprofile/generated_scopeprofilesslice.go | 152 + .../pdata/pprofile/generated_valuetype.go | 87 + .../pprofile/generated_valuetypeslice.go | 152 + .../collector/pdata/pprofile/json.go | 356 + .../collector/pdata/pprofile/pb.go | 31 + .../generated_exportpartialsuccess.go | 75 + .../pdata/pprofile/pprofileotlp/grpc.go | 91 + .../pdata/pprofile/pprofileotlp/request.go | 79 + .../pdata/pprofile/pprofileotlp/response.go | 87 + .../collector/pdata/pprofile/profileid.go | 37 + .../collector/pdata/pprofile/profiles.go | 68 + .../collector/pdata/ptrace/json.go | 4 + .../pdata/ptrace/ptraceotlp/response.go | 1 - .../collector/pdata/testdata/log.go | 4 +- .../collector/pdata/testdata/profile.go | 66 + .../collector/pdata/testdata/trace.go | 1 + .../collector/pipeline/LICENSE | 202 + .../collector/pipeline/Makefile | 1 + .../pipeline/internal/globalsignal/signal.go | 51 + .../collector/pipeline/pipeline.go | 131 + .../collector/pipeline/signal.go | 22 + .../collector/pipeline/xpipeline/LICENSE | 202 + .../collector/pipeline/xpipeline/Makefile | 1 + .../collector/pipeline/xpipeline/config.go | 8 + .../processor/internal/obsmetrics.go | 13 + .../collector/processor/processor.go | 230 +- .../processorhelper/documentation.md | 68 +- .../internal/metadata/generated_telemetry.go | 110 +- .../processor/processorhelper/logs.go | 32 +- .../processor/processorhelper/metadata.yaml | 73 +- .../processor/processorhelper/metrics.go | 32 +- .../processor/processorhelper/obsreport.go | 120 +- .../processor/processorhelper/processor.go | 28 +- .../processor/processorhelper/traces.go | 33 +- .../collector/processor/processortest/LICENSE | 202 + .../processor/processortest/Makefile | 1 + .../processor/processortest/nop_processor.go | 70 + .../processortest/shutdown_verifier.go | 99 + .../processortest/unhealthy_processor.go | 62 + .../collector/processor/xprocessor/LICENSE | 202 + .../collector/processor/xprocessor/Makefile | 1 + .../processor/xprocessor/processor.go | 114 + .../internal/obsmetrics.go} | 9 +- .../collector/receiver/otlpreceiver/README.md | 48 +- .../collector/receiver/otlpreceiver/config.go | 6 +- .../collector/receiver/otlpreceiver/config.md | 6 +- .../receiver/otlpreceiver/encoder.go | 23 + .../receiver/otlpreceiver/factory.go | 61 +- .../otlpreceiver/internal/errors/errors.go | 12 + .../internal/metadata/generated_status.go | 10 +- .../otlpreceiver/internal/profiles/otlp.go | 48 + .../receiver/otlpreceiver/metadata.yaml | 4 +- .../collector/receiver/otlpreceiver/otlp.go | 47 +- .../receiver/otlpreceiver/otlphttp.go | 32 + .../collector/receiver/receiver.go | 222 +- .../receiver/receiverhelper/documentation.md | 36 +- .../internal/metadata/generated_telemetry.go | 89 +- .../receiver/receiverhelper/metadata.yaml | 28 +- .../receiver/receiverhelper/obsreport.go | 79 +- .../collector/receiver/receivertest/LICENSE | 202 + .../collector/receiver/receivertest/Makefile | 1 + .../receiver/receivertest/contract_checker.go | 30 +- .../receiver/receivertest/nop_receiver.go | 51 +- .../collector/receiver/xreceiver/LICENSE | 202 + .../collector/receiver/xreceiver/Makefile | 1 + .../collector/receiver/xreceiver/profiles.go | 116 + .../semconv/v1.12.0/generated_resource.go | 1086 ++ .../semconv/v1.12.0/generated_trace.go | 1805 ++++ .../collector/semconv/v1.12.0/nonstandard.go | 11 + .../collector/semconv/v1.12.0/schema.go | 9 + .../semconv/v1.13.0/generated_resource.go | 4 +- .../semconv/v1.16.0/generated_resource.go | 1168 ++ .../semconv/v1.16.0/generated_trace.go | 1913 ++++ .../collector/semconv/v1.16.0/nonstandard.go | 11 + .../collector/semconv/v1.16.0/schema.go | 9 + .../semconv/v1.18.0/generated_resource.go | 4 +- .../collector/semconv/v1.25.0/doc.go | 9 + .../v1.25.0/generated_attribute_group.go | 4796 ++++++++ .../semconv/v1.25.0/generated_event.go | 105 + .../semconv/v1.25.0/generated_resource.go | 242 + .../semconv/v1.25.0/generated_trace.go | 245 + .../collector/semconv/v1.25.0/schema.go | 9 + .../collector/semconv/v1.26.0/doc.go | 9 + .../v1.26.0/generated_attribute_group.go | 5331 +++++++++ .../semconv/v1.26.0/generated_event.go | 10 + .../semconv/v1.26.0/generated_resource.go | 10 + .../semconv/v1.26.0/generated_trace.go | 10 + .../collector/semconv/v1.26.0/schema.go | 9 + .../collector/semconv/v1.27.0/doc.go | 9 + .../v1.27.0/generated_attribute_group.go | 5843 ++++++++++ .../semconv/v1.27.0/generated_event.go | 10 + .../semconv/v1.27.0/generated_resource.go | 10 + .../semconv/v1.27.0/generated_trace.go | 10 + .../collector/semconv/v1.27.0/schema.go | 9 + .../semconv/v1.6.1/generated_resource.go | 2 +- .../semconv/v1.9.0/generated_resource.go | 2 +- .../collector/service/attributes.go | 27 + .../collector/service/documentation.md | 55 + .../service/extensions/extensions.go | 96 +- .../collector/service/extensions/graph.go | 6 +- .../collector/service/host.go | 74 - .../service/internal/builders/builders.go | 28 + .../service/internal/builders/connector.go | 399 + .../service/internal/builders/exporter.go | 112 + .../service/internal/builders/extension.go | 69 + .../service/internal/builders/processor.go | 126 + .../service/internal/builders/receiver.go | 128 + .../capabilityconsumer/capabilities.go | 35 +- .../service/internal/components/loggers.go | 9 +- .../service/internal/graph/capabilities.go | 40 + .../service/internal/graph/connector.go | 220 + .../service/internal/graph/consumer.go | 39 + .../service/internal/graph/exporter.go | 68 + .../service/internal/graph/fanout.go | 31 + .../collector/service/internal/graph/graph.go | 284 +- .../collector/service/internal/graph/host.go | 169 + .../collector/service/internal/graph/node.go | 22 + .../collector/service/internal/graph/nodes.go | 396 - .../service/internal/graph/processor.go | 70 + .../service/internal/graph/receiver.go | 81 + .../service/internal/graph/zpages.go | 2 +- .../internal/metadata/generated_telemetry.go | 179 + .../proctelemetry/process_telemetry.go | 110 +- .../nop_telemetry_settings.go | 27 - .../servicetelemetry/telemetry_settings.go | 55 - .../service/internal/status/status.go | 141 +- .../collector/service/metadata.yaml | 73 + .../collector/service/pipelines/config.go | 34 +- .../collector/service/service.go | 224 +- .../collector/service/telemetry/config.go | 73 +- .../collector/service/telemetry/factory.go | 109 +- .../service/telemetry/factory_impl.go | 111 + .../service/telemetry/internal/factory.go | 116 - .../internal/otelinit}/config.go | 172 +- .../collector/service/telemetry/logger.go | 55 +- .../{telemetry.go => telemetry/metrics.go} | 55 +- .../collector/service/telemetry/telemetry.go | 11 - .../collector/service/telemetry/tracer.go | 77 +- .../collector/service/zpages.go | 99 - .../contrib/bridges/otelzap/LICENSE | 201 + .../contrib/bridges/otelzap/README.md | 3 + .../contrib/bridges/otelzap/convert.go | 123 + .../contrib/bridges/otelzap/core.go | 277 + .../contrib/bridges/otelzap/encoder.go | 274 + .../contrib/bridges/otelzap/gen.go | 8 + .../contrib/bridges/prometheus/producer.go | 4 +- .../contrib/config/config.go | 19 +- .../contrib/config/generated_config.go | 46 +- .../go.opentelemetry.io/contrib/config/log.go | 155 + .../contrib/config/metric.go | 203 +- .../contrib/config/resource.go | 11 +- .../contrib/config/trace.go | 2 +- .../contrib/exporters/autoexport/logs.go | 8 +- .../google.golang.org/grpc/otelgrpc/config.go | 82 +- .../grpc/otelgrpc/interceptor.go | 5 +- .../grpc/otelgrpc/stats_handler.go | 36 +- .../grpc/otelgrpc/version.go | 2 +- .../net/http/otelhttp/client.go | 6 +- .../net/http/otelhttp/common.go | 7 - .../net/http/otelhttp/handler.go | 31 +- .../net/http/otelhttp/internal/semconv/env.go | 120 +- .../otelhttp/internal/semconv/httpconv.go | 34 +- .../http/otelhttp/internal/semconv/util.go | 4 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 130 +- .../net/http/otelhttp/start_time_context.go | 29 + .../net/http/otelhttp/transport.go | 58 +- .../net/http/otelhttp/version.go | 2 +- .../contrib/propagators/b3/b3_propagator.go | 7 +- .../contrib/propagators/b3/version.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 8 - vendor/go.opentelemetry.io/otel/.golangci.yml | 20 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 79 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 18 +- vendor/go.opentelemetry.io/otel/Makefile | 16 +- vendor/go.opentelemetry.io/otel/VERSIONING.md | 2 +- .../otel/baggage/baggage.go | 4 +- .../internal/oc2otel/span_context.go | 12 + .../opencensus/internal/ocmetric/metric.go | 12 +- .../internal/otel2oc/span_context.go | 10 + .../otel/bridge/opencensus/internal/span.go | 4 +- .../otel/bridge/opencensus/internal/tracer.go | 2 +- .../otel/bridge/opencensus/version.go | 2 +- .../otel/bridge/opentracing/bridge.go | 5 +- .../otel/bridge/opentracing/provider.go | 5 + .../go.opentelemetry.io/otel/codes/codes.go | 3 +- .../otlp/otlplog/otlploggrpc/LICENSE | 201 + .../otlp/otlplog/otlploggrpc/README.md | 3 + .../otlp/otlplog/otlploggrpc/client.go | 258 + .../otlp/otlplog/otlploggrpc/config.go | 653 ++ .../exporters/otlp/otlplog/otlploggrpc/doc.go | 63 + .../otlp/otlplog/otlploggrpc/exporter.go | 93 + .../otlploggrpc/internal/retry/retry.go | 145 + .../otlploggrpc/internal/transform/log.go | 390 + .../otlp/otlplog/otlploggrpc/version.go | 9 + .../otlp/otlplog/otlploghttp/client.go | 86 +- .../otlp/otlplog/otlploghttp/config.go | 21 +- .../exporters/otlp/otlplog/otlploghttp/doc.go | 2 +- .../otlp/otlplog/otlploghttp/exporter.go | 3 + .../otlploghttp/internal/transform/log.go | 107 +- .../otlp/otlplog/otlploghttp/version.go | 2 +- .../otlp/otlpmetric/otlpmetricgrpc/client.go | 7 +- .../otlp/otlpmetric/otlpmetricgrpc/config.go | 10 +- .../otlp/otlpmetric/otlpmetricgrpc/doc.go | 5 +- .../otlpmetric/otlpmetricgrpc/exporter.go | 3 +- .../internal/envconfig/envconfig.go | 32 +- .../otlpmetricgrpc/internal/oconf/options.go | 8 +- .../otlpmetricgrpc/internal/oconf/tls.go | 2 +- .../internal/transform/metricdata.go | 18 +- .../otlp/otlpmetric/otlpmetricgrpc/version.go | 2 +- .../otlp/otlpmetric/otlpmetrichttp/client.go | 86 +- .../otlp/otlpmetric/otlpmetrichttp/config.go | 10 +- .../otlp/otlpmetric/otlpmetrichttp/doc.go | 2 +- .../otlpmetric/otlpmetrichttp/exporter.go | 3 +- .../internal/envconfig/envconfig.go | 32 +- .../otlpmetrichttp/internal/oconf/options.go | 8 +- .../otlpmetrichttp/internal/oconf/tls.go | 2 +- .../internal/transform/metricdata.go | 18 +- .../otlp/otlpmetric/otlpmetrichttp/version.go | 2 +- .../tracetransform/instrumentation.go | 5 +- .../otlptrace/internal/tracetransform/span.go | 30 +- .../otlp/otlptrace/otlptracegrpc/client.go | 7 +- .../otlp/otlptrace/otlptracegrpc/doc.go | 5 +- .../internal/envconfig/envconfig.go | 32 +- .../internal/otlpconfig/options.go | 8 +- .../otlp/otlptrace/otlptracehttp/client.go | 73 +- .../otlp/otlptrace/otlptracehttp/doc.go | 2 +- .../internal/envconfig/envconfig.go | 32 +- .../internal/otlpconfig/options.go | 8 +- .../otlp/otlptrace/otlptracehttp/options.go | 11 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/exporters/prometheus/config.go | 6 +- .../otel/exporters/prometheus/exporter.go | 204 +- .../otel/internal/attribute/attribute.go | 44 +- .../otel/internal/global/instruments.go | 14 +- .../otel/internal/global/meter.go | 69 +- .../otel/internal/global/trace.go | 33 +- vendor/go.opentelemetry.io/otel/log/DESIGN.md | 33 +- vendor/go.opentelemetry.io/otel/log/doc.go | 10 +- .../otel/log/global/README.md | 3 + .../otel/log/global/log.go | 49 + .../otel/log/internal/global/log.go | 107 + .../otel/log/internal/global/state.go | 53 + .../go.opentelemetry.io/otel/log/keyvalue.go | 14 +- vendor/go.opentelemetry.io/otel/log/logger.go | 23 +- .../go.opentelemetry.io/otel/log/noop/noop.go | 2 +- .../go.opentelemetry.io/otel/log/provider.go | 11 + vendor/go.opentelemetry.io/otel/log/record.go | 3 + .../otel/sdk/instrumentation/scope.go | 4 + .../otel/sdk/log/DESIGN.md | 60 +- .../go.opentelemetry.io/otel/sdk/log/batch.go | 34 +- .../go.opentelemetry.io/otel/sdk/log/doc.go | 34 +- .../otel/sdk/log/exporter.go | 13 +- .../otel/sdk/log/internal/x/README.md | 35 + .../otel/sdk/log/internal/x/x.go | 47 + .../otel/sdk/log/logger.go | 27 +- .../otel/sdk/log/processor.go | 32 +- .../otel/sdk/log/provider.go | 35 +- .../otel/sdk/log/record.go | 102 +- .../otel/sdk/log/simple.go | 46 +- .../otel/sdk/metric/config.go | 79 +- .../otel/sdk/metric/doc.go | 8 + .../otel/sdk/metric/exemplar.go | 68 +- .../otel/sdk/metric/exemplar/README.md | 3 + .../sdk/metric/{internal => }/exemplar/doc.go | 2 +- .../{internal => }/exemplar/exemplar.go | 2 +- .../metric/{internal => }/exemplar/filter.go | 11 +- .../fixed_size_reservoir.go} | 118 +- .../metric/exemplar/histogram_reservoir.go | 70 + .../{internal => }/exemplar/reservoir.go | 10 +- .../metric/{internal => }/exemplar/storage.go | 8 +- .../metric/{internal => }/exemplar/value.go | 8 +- .../otel/sdk/metric/exporter.go | 4 +- .../otel/sdk/metric/instrument.go | 25 +- .../metric/internal/aggregate/aggregate.go | 9 +- .../sdk/metric/internal/aggregate/drop.go | 27 + .../sdk/metric/internal/aggregate/exemplar.go | 3 +- .../aggregate/exponential_histogram.go | 99 +- .../internal/aggregate/filtered_reservoir.go | 50 + .../metric/internal/aggregate/histogram.go | 11 +- .../metric/internal/aggregate/lastvalue.go | 11 +- .../otel/sdk/metric/internal/aggregate/sum.go | 17 +- .../otel/sdk/metric/internal/exemplar/drop.go | 23 - .../internal/exemplar/filtered_reservoir.go | 49 - .../otel/sdk/metric/internal/exemplar/hist.go | 46 - .../otel/sdk/metric/internal/x/README.md | 19 + .../otel/sdk/metric/internal/x/x.go | 58 +- .../otel/sdk/metric/manual_reader.go | 9 +- .../otel/sdk/metric/meter.go | 143 +- .../otel/sdk/metric/periodic_reader.go | 9 +- .../otel/sdk/metric/pipeline.go | 132 +- .../otel/sdk/metric/provider.go | 10 +- .../otel/sdk/metric/reader.go | 16 +- .../otel/sdk/metric/version.go | 2 +- .../otel/sdk/metric/view.go | 11 +- .../otel/sdk/resource/auto.go | 62 +- .../otel/sdk/resource/builtin.go | 6 +- .../otel/sdk/trace/batch_span_processor.go | 1 + .../otel/sdk/trace/provider.go | 9 +- .../otel/sdk/trace/sampler_env.go | 5 +- .../otel/sdk/trace/span.go | 106 +- .../otel/sdk/trace/tracetest/recorder.go | 13 + .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../go.opentelemetry.io/otel/trace/config.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 15 +- .../proto/otlp/metrics/v1/metrics.pb.go | 27 +- vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/exp/slices/sort.go | 4 +- vendor/golang.org/x/mod/LICENSE | 4 +- vendor/golang.org/x/net/context/context.go | 56 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + vendor/golang.org/x/net/html/atom/atom.go | 78 + vendor/golang.org/x/net/html/atom/table.go | 783 ++ .../golang.org/x/net/html/charset/charset.go | 257 + vendor/golang.org/x/net/html/const.go | 111 + vendor/golang.org/x/net/html/doc.go | 122 + vendor/golang.org/x/net/html/doctype.go | 156 + vendor/golang.org/x/net/html/entity.go | 2253 ++++ vendor/golang.org/x/net/html/escape.go | 339 + vendor/golang.org/x/net/html/foreign.go | 222 + vendor/golang.org/x/net/html/iter.go | 56 + vendor/golang.org/x/net/html/node.go | 229 + vendor/golang.org/x/net/html/parse.go | 2460 +++++ vendor/golang.org/x/net/html/render.go | 293 + vendor/golang.org/x/net/html/token.go | 1272 +++ vendor/golang.org/x/net/http2/frame.go | 4 +- vendor/golang.org/x/net/http2/http2.go | 42 +- vendor/golang.org/x/net/http2/server.go | 35 +- vendor/golang.org/x/net/http2/transport.go | 137 +- vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/README.md | 15 +- vendor/golang.org/x/oauth2/token.go | 7 + .../x/text/encoding/htmlindex/htmlindex.go | 86 + .../x/text/encoding/htmlindex/map.go | 105 + .../x/text/encoding/htmlindex/tables.go | 362 + .../x/text/internal/language/common.go | 16 + .../x/text/internal/language/compact.go | 29 + .../text/internal/language/compact/compact.go | 61 + .../internal/language/compact/language.go | 260 + .../text/internal/language/compact/parents.go | 120 + .../text/internal/language/compact/tables.go | 1015 ++ .../x/text/internal/language/compact/tags.go | 91 + .../x/text/internal/language/compose.go | 167 + .../x/text/internal/language/coverage.go | 28 + .../x/text/internal/language/language.go | 627 ++ .../x/text/internal/language/lookup.go | 412 + .../x/text/internal/language/match.go | 226 + .../x/text/internal/language/parse.go | 608 ++ .../x/text/internal/language/tables.go | 3494 ++++++ .../x/text/internal/language/tags.go | 48 + vendor/golang.org/x/text/internal/tag/tag.go | 100 + vendor/golang.org/x/text/language/coverage.go | 187 + vendor/golang.org/x/text/language/doc.go | 98 + vendor/golang.org/x/text/language/language.go | 605 ++ vendor/golang.org/x/text/language/match.go | 735 ++ vendor/golang.org/x/text/language/parse.go | 256 + vendor/golang.org/x/text/language/tables.go | 298 + vendor/golang.org/x/text/language/tags.go | 145 + vendor/golang.org/x/tools/LICENSE | 4 +- .../x/tools/go/gcexportdata/gcexportdata.go | 97 +- vendor/golang.org/x/tools/go/packages/doc.go | 15 +- .../x/tools/go/packages/external.go | 8 +- .../golang.org/x/tools/go/packages/golist.go | 38 +- .../x/tools/go/packages/loadmode_string.go | 69 +- .../x/tools/go/packages/packages.go | 362 +- .../golang.org/x/tools/go/packages/visit.go | 9 + .../x/tools/go/types/objectpath/objectpath.go | 139 +- .../x/tools/go/types/typeutil/callee.go | 68 + .../x/tools/go/types/typeutil/imports.go | 30 + .../x/tools/go/types/typeutil/map.go | 517 + .../tools/go/types/typeutil/methodsetcache.go | 71 + .../x/tools/go/types/typeutil/ui.go | 53 + .../x/tools/internal/aliases/aliases.go | 10 +- .../x/tools/internal/aliases/aliases_go121.go | 31 - .../x/tools/internal/aliases/aliases_go122.go | 57 +- .../x/tools/internal/gcimporter/bimport.go | 61 - .../x/tools/internal/gcimporter/gcimporter.go | 11 +- .../x/tools/internal/gcimporter/iexport.go | 284 +- .../x/tools/internal/gcimporter/iimport.go | 43 +- .../internal/gcimporter/iimport_go122.go | 53 + .../internal/gcimporter/newInterface10.go | 22 - .../internal/gcimporter/newInterface11.go | 14 - .../tools/internal/gcimporter/predeclared.go | 91 + .../internal/gcimporter/support_go118.go | 34 - .../x/tools/internal/gcimporter/unified_no.go | 10 - .../tools/internal/gcimporter/unified_yes.go | 10 - .../tools/internal/gcimporter/ureader_yes.go | 44 +- .../x/tools/internal/gocommand/invoke.go | 18 +- .../x/tools/internal/pkgbits/decoder.go | 38 +- .../x/tools/internal/pkgbits/encoder.go | 43 +- .../x/tools/internal/pkgbits/frames_go1.go | 21 - .../x/tools/internal/pkgbits/frames_go17.go | 28 - .../x/tools/internal/pkgbits/support.go | 2 +- .../x/tools/internal/pkgbits/sync.go | 23 + .../internal/pkgbits/syncmarker_string.go | 7 +- .../x/tools/internal/pkgbits/version.go | 85 + .../x/tools/internal/stdlib/manifest.go | 2 +- .../internal/tokeninternal/tokeninternal.go | 137 - .../x/tools/internal/typeparams/common.go | 140 + .../x/tools/internal/typeparams/coretype.go | 150 + .../x/tools/internal/typeparams/free.go | 131 + .../x/tools/internal/typeparams/normalize.go | 218 + .../x/tools/internal/typeparams/termlist.go | 163 + .../x/tools/internal/typeparams/typeterm.go | 169 + .../x/tools/internal/typesinternal/element.go | 133 + .../tools/internal/typesinternal/errorcode.go | 8 +- .../x/tools/internal/typesinternal/recv.go | 8 +- .../x/tools/internal/typesinternal/types.go | 56 + .../x/tools/internal/versions/constraint.go | 13 + ...toolchain_go121.go => constraint_go121.go} | 6 +- .../x/tools/internal/versions/toolchain.go | 14 - .../internal/versions/toolchain_go119.go | 14 - .../internal/versions/toolchain_go120.go | 14 - .../x/tools/internal/versions/types.go | 28 +- .../x/tools/internal/versions/types_go121.go | 30 - .../x/tools/internal/versions/types_go122.go | 41 - vendor/gonum.org/v1/gonum/AUTHORS | 5 + vendor/gonum.org/v1/gonum/CONTRIBUTORS | 5 + vendor/gonum.org/v1/gonum/blas/blas64/conv.go | 14 - .../gonum.org/v1/gonum/blas/cblas128/conv.go | 14 - vendor/gonum.org/v1/gonum/blas/gonum/gonum.go | 14 - vendor/gonum.org/v1/gonum/floats/floats.go | 7 +- .../v1/gonum/graph/internal/ordered/doc.go | 6 - .../v1/gonum/graph/internal/ordered/sort.go | 85 - .../v1/gonum/graph/internal/set/same.go | 11 +- .../graph/internal/set/same_appengine.go | 11 +- .../v1/gonum/graph/internal/set/set.go | 63 +- .../v1/gonum/graph/iterator/lines_map_safe.go | 54 +- .../v1/gonum/graph/iterator/nodes_map_safe.go | 62 +- .../gonum.org/v1/gonum/graph/set/uid/uid.go | 4 +- .../graph/simple/dense_directed_matrix.go | 4 +- .../graph/simple/dense_undirected_matrix.go | 4 +- .../v1/gonum/graph/topo/bron_kerbosch.go | 7 +- .../v1/gonum/graph/topo/clique_graph.go | 8 +- .../v1/gonum/graph/topo/johnson_cycles.go | 28 +- .../v1/gonum/graph/topo/paton_cycles.go | 8 +- .../gonum.org/v1/gonum/graph/topo/tarjan.go | 26 +- vendor/gonum.org/v1/gonum/graph/topo/topo.go | 10 +- .../v1/gonum/graph/traverse/traverse.go | 8 +- .../gonum.org/v1/gonum/internal/order/doc.go | 6 + .../v1/gonum/internal/order/order.go | 60 + .../gonum.org/v1/gonum/lapack/gonum/lapack.go | 14 - vendor/gonum.org/v1/gonum/lapack/lapack.go | 2 +- .../v1/gonum/lapack/lapack64/lapack64.go | 7 - vendor/gonum.org/v1/gonum/mat/lu.go | 2 + vendor/gonum.org/v1/gonum/mat/matrix.go | 14 - vendor/gonum.org/v1/gonum/mat/qr.go | 50 +- .../googleapis/api/annotations/client.pb.go | 819 +- .../api/annotations/field_info.pb.go | 159 +- .../googleapis/api/annotations/http.pb.go | 48 +- .../googleapis/api/annotations/resource.pb.go | 9 +- .../googleapis/api/httpbody/httpbody.pb.go | 6 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 16 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 33 +- vendor/google.golang.org/grpc/SECURITY.md | 2 +- .../google.golang.org/grpc/backoff/backoff.go | 2 +- .../grpc/balancer/balancer.go | 36 +- .../grpc/balancer/base/balancer.go | 6 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 28 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 85 +- .../grpc/balancer/grpclb/grpclb.go | 4 +- .../balancer/pickfirst/internal/internal.go | 24 + .../grpc/balancer/pickfirst/pickfirst.go | 18 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 625 ++ .../grpc/balancer_wrapper.go | 78 +- .../grpc_binarylog_v1/binarylog.pb.go | 24 +- vendor/google.golang.org/grpc/clientconn.go | 134 +- vendor/google.golang.org/grpc/codec.go | 69 +- .../alts/internal/conn/aeadrekey.go | 2 +- .../alts/internal/conn/aes128gcmrekey.go | 2 +- .../credentials/alts/internal/conn/record.go | 7 - .../alts/internal/handshaker/handshaker.go | 4 +- .../internal/handshaker/service/service.go | 6 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 8 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 299 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 23 +- .../grpc_gcp/transport_security_common.pb.go | 10 +- .../grpc/credentials/insecure/insecure.go | 2 +- .../grpc/credentials/oauth/oauth.go | 6 +- .../google.golang.org/grpc/credentials/tls.go | 29 +- vendor/google.golang.org/grpc/dialoptions.go | 31 +- vendor/google.golang.org/grpc/doc.go | 2 +- .../grpc/encoding/encoding.go | 5 +- .../grpc/encoding/encoding_v2.go | 81 + .../grpc/encoding/proto/proto.go | 44 +- .../grpc/experimental/stats/metricregistry.go | 269 + .../grpc/experimental/stats/metrics.go | 114 + .../grpc/grpclog/component.go | 10 +- .../google.golang.org/grpc/grpclog/grpclog.go | 104 +- .../grpc/grpclog/internal/grpclog.go | 26 + .../grpc/grpclog/internal/logger.go | 87 + .../internal/loggerv2.go} | 178 +- .../google.golang.org/grpc/grpclog/logger.go | 59 +- .../grpc/grpclog/loggerv2.go | 181 +- .../grpc/health/grpc_health_v1/health.pb.go | 10 +- .../health/grpc_health_v1/health_grpc.pb.go | 71 +- .../google.golang.org/grpc/health/server.go | 2 +- .../balancer/gracefulswitch/config.go | 2 + .../grpc/internal/binarylog/method_logger.go | 2 +- .../grpc/internal/channelz/channel.go | 15 + .../grpc/internal/channelz/channelmap.go | 9 +- .../grpc/internal/channelz/funcs.go | 2 +- .../grpc/internal/channelz/server.go | 2 + .../grpc/internal/channelz/socket.go | 7 + .../grpc/internal/channelz/subchannel.go | 2 + .../internal/channelz/syscall_nonlinux.go | 4 +- .../grpc/internal/channelz/trace.go | 19 +- .../grpc/internal/envconfig/envconfig.go | 11 +- .../grpc/internal/experimental.go | 8 +- .../grpc/internal/googlecloud/googlecloud.go | 6 +- .../{prefixLogger.go => prefix_logger.go} | 40 +- .../internal/grpcsync/callback_serializer.go | 24 +- .../grpc/internal/grpcsync/pubsub.go | 4 +- .../grpc/internal/grpcutil/method.go | 2 +- .../grpc/internal/idle/idle.go | 4 +- .../grpc/internal/internal.go | 30 +- .../internal/resolver/dns/dns_resolver.go | 6 +- .../resolver/passthrough/passthrough.go | 2 +- .../grpc/internal/stats/labels.go | 42 + .../internal/stats/metrics_recorder_list.go | 105 + .../grpc/internal/status/status.go | 39 +- .../grpc/internal/syscall/syscall_nonlinux.go | 6 +- .../grpc/internal/tcp_keepalive_unix.go | 2 +- .../grpc/internal/tcp_keepalive_windows.go | 2 +- .../grpc/internal/transport/controlbuf.go | 256 +- .../grpc/internal/transport/handler_server.go | 47 +- .../grpc/internal/transport/http2_client.go | 135 +- .../grpc/internal/transport/http2_server.go | 49 +- .../grpc/internal/transport/http_util.go | 24 +- .../grpc/internal/transport/proxy.go | 10 +- .../grpc/internal/transport/transport.go | 249 +- .../grpc/keepalive/keepalive.go | 20 +- .../google.golang.org/grpc/mem/buffer_pool.go | 194 + .../grpc/mem/buffer_slice.go | 226 + vendor/google.golang.org/grpc/mem/buffers.go | 268 + .../grpc/metadata/metadata.go | 7 +- vendor/google.golang.org/grpc/preloader.go | 28 +- .../grpc/reflection/README.md | 18 - .../grpc/reflection/adapt.go | 57 - .../grpc_reflection_v1/reflection.pb.go | 953 -- .../grpc_reflection_v1/reflection_grpc.pb.go | 165 - .../grpc_reflection_v1alpha/reflection.pb.go | 1028 -- .../reflection_grpc.pb.go | 162 - .../grpc/reflection/internal/internal.go | 436 - .../grpc/reflection/serverreflection.go | 160 - vendor/google.golang.org/grpc/regenerate.sh | 123 - .../grpc/resolver/manual/manual.go | 4 +- .../grpc/resolver_wrapper.go | 9 +- vendor/google.golang.org/grpc/rpc_util.go | 330 +- vendor/google.golang.org/grpc/server.go | 99 +- .../grpc/shared_buffer_pool.go | 154 - vendor/google.golang.org/grpc/stats/stats.go | 6 - vendor/google.golang.org/grpc/stream.go | 213 +- .../grpc/stream_interfaces.go | 86 + .../grpc/test/bufconn/bufconn.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 789 +- 2238 files changed, 199639 insertions(+), 44717 deletions(-) create mode 100644 tools/packaging/tempo.yaml create mode 100644 vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/compute.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go create mode 100644 vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go delete mode 100644 vendor/github.com/VividCortex/gohistogram/.gitignore delete mode 100644 vendor/github.com/VividCortex/gohistogram/README.md delete mode 100644 vendor/github.com/VividCortex/gohistogram/histogram.go delete mode 100644 vendor/github.com/VividCortex/gohistogram/numerichistogram.go delete mode 100644 vendor/github.com/VividCortex/gohistogram/weightedhistogram.go create mode 100644 vendor/github.com/antchfx/xmlquery/.gitignore rename vendor/github.com/{VividCortex/gohistogram => antchfx/xmlquery}/LICENSE (95%) create mode 100644 vendor/github.com/antchfx/xmlquery/README.md create mode 100644 vendor/github.com/antchfx/xmlquery/cache.go create mode 100644 vendor/github.com/antchfx/xmlquery/cached_reader.go create mode 100644 vendor/github.com/antchfx/xmlquery/node.go create mode 100644 vendor/github.com/antchfx/xmlquery/options.go create mode 100644 vendor/github.com/antchfx/xmlquery/parse.go create mode 100644 vendor/github.com/antchfx/xmlquery/query.go create mode 100644 vendor/github.com/antchfx/xpath/.gitignore rename vendor/github.com/{mitchellh/go-testing-interface => antchfx/xpath}/LICENSE (92%) create mode 100644 vendor/github.com/antchfx/xpath/README.md create mode 100644 vendor/github.com/antchfx/xpath/build.go create mode 100644 vendor/github.com/antchfx/xpath/cache.go create mode 100644 vendor/github.com/antchfx/xpath/func.go create mode 100644 vendor/github.com/antchfx/xpath/func_go110.go create mode 100644 vendor/github.com/antchfx/xpath/func_pre_go110.go create mode 100644 vendor/github.com/antchfx/xpath/operator.go create mode 100644 vendor/github.com/antchfx/xpath/parse.go create mode 100644 vendor/github.com/antchfx/xpath/query.go create mode 100644 vendor/github.com/antchfx/xpath/xpath.go create mode 100644 vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE create mode 100644 vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE create mode 100644 vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go create mode 100644 vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go rename vendor/{go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE => github.com/aws/aws-sdk-go-v2/LICENSE.txt} (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/version.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md rename vendor/{go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE => github.com/aws/aws-sdk-go-v2/config/LICENSE.txt} (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/local.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md rename vendor/{go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE => github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt} (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md rename vendor/{go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE => github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt} (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md rename vendor/{go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE => github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt} (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md rename vendor/{go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE => github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt} (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go create mode 100644 vendor/github.com/aws/smithy-go/.gitignore create mode 100644 vendor/github.com/aws/smithy-go/.travis.yml create mode 100644 vendor/github.com/aws/smithy-go/CHANGELOG.md create mode 100644 vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/aws/smithy-go/CONTRIBUTING.md create mode 100644 vendor/github.com/aws/smithy-go/LICENSE create mode 100644 vendor/github.com/aws/smithy-go/Makefile create mode 100644 vendor/github.com/aws/smithy-go/NOTICE create mode 100644 vendor/github.com/aws/smithy-go/README.md create mode 100644 vendor/github.com/aws/smithy-go/auth/bearer/docs.go create mode 100644 vendor/github.com/aws/smithy-go/auth/bearer/middleware.go create mode 100644 vendor/github.com/aws/smithy-go/auth/bearer/token.go create mode 100644 vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go create mode 100644 vendor/github.com/aws/smithy-go/context/suppress_expired.go create mode 100644 vendor/github.com/aws/smithy-go/doc.go create mode 100644 vendor/github.com/aws/smithy-go/document.go create mode 100644 vendor/github.com/aws/smithy-go/document/doc.go create mode 100644 vendor/github.com/aws/smithy-go/document/document.go create mode 100644 vendor/github.com/aws/smithy-go/document/errors.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/doc.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/encoding.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/array.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/constants.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/encoder.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/escape.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/object.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/json/value.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/array.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/constants.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/doc.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/element.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/encoder.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/escape.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/map.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/value.go create mode 100644 vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go create mode 100644 vendor/github.com/aws/smithy-go/endpoints/endpoint.go create mode 100644 vendor/github.com/aws/smithy-go/errors.go create mode 100644 vendor/github.com/aws/smithy-go/go_module_metadata.go create mode 100644 vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE create mode 100644 vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go create mode 100644 vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go create mode 100644 vendor/github.com/aws/smithy-go/io/byte.go create mode 100644 vendor/github.com/aws/smithy-go/io/doc.go create mode 100644 vendor/github.com/aws/smithy-go/io/reader.go create mode 100644 vendor/github.com/aws/smithy-go/io/ringbuffer.go create mode 100644 vendor/github.com/aws/smithy-go/local-mod-replace.sh create mode 100644 vendor/github.com/aws/smithy-go/logging/logger.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/doc.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/logging.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/metadata.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/middleware.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/ordered_group.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/stack.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/stack_values.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/step_build.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/step_deserialize.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/step_finalize.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/step_initialize.go create mode 100644 vendor/github.com/aws/smithy-go/middleware/step_serialize.go create mode 100644 vendor/github.com/aws/smithy-go/modman.toml create mode 100644 vendor/github.com/aws/smithy-go/properties.go create mode 100644 vendor/github.com/aws/smithy-go/ptr/doc.go create mode 100644 vendor/github.com/aws/smithy-go/ptr/from_ptr.go create mode 100644 vendor/github.com/aws/smithy-go/ptr/gen_scalars.go create mode 100644 vendor/github.com/aws/smithy-go/ptr/to_ptr.go create mode 100644 vendor/github.com/aws/smithy-go/rand/doc.go create mode 100644 vendor/github.com/aws/smithy-go/rand/rand.go create mode 100644 vendor/github.com/aws/smithy-go/rand/uuid.go create mode 100644 vendor/github.com/aws/smithy-go/time/time.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/client.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/doc.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/headerlist.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/host.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/request.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/response.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/time.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/url.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/user_agent.go create mode 100644 vendor/github.com/aws/smithy-go/validation.go create mode 100644 vendor/github.com/ebitengine/purego/.gitignore create mode 100644 vendor/github.com/ebitengine/purego/LICENSE create mode 100644 vendor/github.com/ebitengine/purego/README.md create mode 100644 vendor/github.com/ebitengine/purego/abi_amd64.h create mode 100644 vendor/github.com/ebitengine/purego/abi_arm64.h create mode 100644 vendor/github.com/ebitengine/purego/cgo.go create mode 100644 vendor/github.com/ebitengine/purego/dlerror.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_android.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_linux.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_nocgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_playground.go create mode 100644 vendor/github.com/ebitengine/purego/dlfcn_stubs.s create mode 100644 vendor/github.com/ebitengine/purego/func.go create mode 100644 vendor/github.com/ebitengine/purego/go_runtime.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/empty.go create mode 100644 vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s create mode 100644 vendor/github.com/ebitengine/purego/internal/strings/strings.go create mode 100644 vendor/github.com/ebitengine/purego/is_ios.go create mode 100644 vendor/github.com/ebitengine/purego/nocgo.go create mode 100644 vendor/github.com/ebitengine/purego/struct_amd64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_arm64.go create mode 100644 vendor/github.com/ebitengine/purego/struct_other.go create mode 100644 vendor/github.com/ebitengine/purego/sys_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/sys_unix_arm64.s create mode 100644 vendor/github.com/ebitengine/purego/syscall.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_cgo_linux.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_sysv.go create mode 100644 vendor/github.com/ebitengine/purego/syscall_windows.go create mode 100644 vendor/github.com/ebitengine/purego/zcallback_amd64.s create mode 100644 vendor/github.com/ebitengine/purego/zcallback_arm64.s create mode 100644 vendor/github.com/elastic/go-grok/.gitignore create mode 100644 vendor/github.com/elastic/go-grok/.go-version rename vendor/github.com/{oklog/run => elastic/go-grok}/LICENSE (100%) create mode 100644 vendor/github.com/elastic/go-grok/NOTICE.txt create mode 100644 vendor/github.com/elastic/go-grok/README.md create mode 100644 vendor/github.com/elastic/go-grok/catalog-info.yaml create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/check.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/install.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go create mode 100644 vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go create mode 100644 vendor/github.com/elastic/go-grok/grok.go create mode 100644 vendor/github.com/elastic/go-grok/magefile.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/aws.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/bind9.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/bro.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/default.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/exim.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/firewalls.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/haproxy.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/httpd.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/java.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/junos.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/maven.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/mcollective.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/mongodb.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/postgresql.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/rails.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/redis.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/ruby.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/squid.go create mode 100644 vendor/github.com/elastic/go-grok/patterns/syslog.go create mode 100644 vendor/github.com/elastic/lunes/.editorconfig create mode 100644 vendor/github.com/elastic/lunes/.gitignore create mode 100644 vendor/github.com/elastic/lunes/.go-version create mode 100644 vendor/github.com/elastic/lunes/.golangci.yml create mode 100644 vendor/github.com/elastic/lunes/CHANGELOG.md create mode 100644 vendor/github.com/elastic/lunes/LICENSE create mode 100644 vendor/github.com/elastic/lunes/NOTICE.txt create mode 100644 vendor/github.com/elastic/lunes/README.md create mode 100644 vendor/github.com/elastic/lunes/catalog-info.yaml create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/check.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/deps.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/install.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/linter.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/mage.go create mode 100644 vendor/github.com/elastic/lunes/dev-tools/mage/notice.go create mode 100644 vendor/github.com/elastic/lunes/locale.go create mode 100644 vendor/github.com/elastic/lunes/lunes.go create mode 100644 vendor/github.com/elastic/lunes/magefile.go create mode 100644 vendor/github.com/elastic/lunes/tables.go create mode 100644 vendor/github.com/expr-lang/expr/optimizer/sum_array.go create mode 100644 vendor/github.com/expr-lang/expr/optimizer/sum_map.go create mode 100644 vendor/github.com/expr-lang/expr/patcher/with_timezone.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/.editorconfig delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh delete mode 100644 vendor/github.com/go-kit/kit/LICENSE delete mode 100644 vendor/github.com/go-kit/kit/metrics/README.md delete mode 100644 vendor/github.com/go-kit/kit/metrics/doc.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/expvar/expvar.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/generic/generic.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/internal/lv/space.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/metrics.go delete mode 100644 vendor/github.com/go-kit/kit/metrics/timer.go delete mode 100644 vendor/github.com/go-viper/mapstructure/v2/error.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go create mode 100644 vendor/github.com/gorilla/handlers/.editorconfig create mode 100644 vendor/github.com/gorilla/handlers/.gitignore create mode 100644 vendor/github.com/gorilla/handlers/Makefile create mode 100644 vendor/github.com/grafana/dskit/test/diff.go create mode 100644 vendor/github.com/grafana/dskit/test/poll.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go rename vendor/github.com/grpc-ecosystem/go-grpc-middleware/{ => v2/interceptors}/retry/doc.go (68%) rename vendor/github.com/grpc-ecosystem/go-grpc-middleware/{ => v2/interceptors}/retry/options.go (66%) rename vendor/github.com/grpc-ecosystem/go-grpc-middleware/{ => v2/interceptors}/retry/retry.go (80%) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-plugin/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/go-plugin/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-plugin/README.md delete mode 100644 vendor/github.com/hashicorp/go-plugin/buf.gen.yaml delete mode 100644 vendor/github.com/hashicorp/go-plugin/buf.yaml delete mode 100644 vendor/github.com/hashicorp/go-plugin/client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/constants.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/discover.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/error.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_broker.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_controller.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto delete mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/log_entry.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/mtls.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/mux_broker.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/plugin.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/process.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/protocol.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_client.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/rpc_server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/runner/runner.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/server.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/server_mux.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/stream.go delete mode 100644 vendor/github.com/hashicorp/go-plugin/testing.go rename vendor/github.com/hashicorp/{yamux => golang-lru}/.gitignore (100%) create mode 100644 vendor/github.com/hashicorp/golang-lru/.golangci.yml create mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/testing.go delete mode 100644 vendor/github.com/hashicorp/yamux/LICENSE delete mode 100644 vendor/github.com/hashicorp/yamux/README.md delete mode 100644 vendor/github.com/hashicorp/yamux/addr.go delete mode 100644 vendor/github.com/hashicorp/yamux/const.go delete mode 100644 vendor/github.com/hashicorp/yamux/mux.go delete mode 100644 vendor/github.com/hashicorp/yamux/session.go delete mode 100644 vendor/github.com/hashicorp/yamux/spec.md delete mode 100644 vendor/github.com/hashicorp/yamux/stream.go delete mode 100644 vendor/github.com/hashicorp/yamux/util.go create mode 100644 vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go rename vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/{strategystore => samplingstrategy}/interface.go (56%) delete mode 100644 vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover delete mode 100644 vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go delete mode 100644 vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go create mode 100644 vendor/github.com/magefile/mage/LICENSE create mode 100644 vendor/github.com/magefile/mage/mg/color.go create mode 100644 vendor/github.com/magefile/mage/mg/color_string.go create mode 100644 vendor/github.com/magefile/mage/mg/deps.go create mode 100644 vendor/github.com/magefile/mage/mg/errors.go create mode 100644 vendor/github.com/magefile/mage/mg/fn.go create mode 100644 vendor/github.com/magefile/mage/mg/runtime.go create mode 100644 vendor/github.com/magefile/mage/sh/cmd.go create mode 100644 vendor/github.com/magefile/mage/sh/helpers.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-prompt-object.go delete mode 100644 vendor/github.com/minio/minio-go/v7/api-prompt-options.go delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/.travis.yml delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go delete mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing_go19.go create mode 100644 vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go create mode 100644 vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go delete mode 100644 vendor/github.com/oklog/run/.gitignore delete mode 100644 vendor/github.com/oklog/run/README.md delete mode 100644 vendor/github.com/oklog/run/actors.go delete mode 100644 vendor/github.com/oklog/run/group.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil/testutil.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE rename vendor/{go.opentelemetry.io/collector/confmap/converter/expandconverter => github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic}/Makefile (100%) create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_sha512.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_slice_to_map.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_sort.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_string.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_to_key_value_string.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go create mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go delete mode 100644 vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/LICENSE (100%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/common/env.go (51%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_aix_cgo.go (96%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_dragonfly_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_fallback.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_386.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_amd64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_freebsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_linux.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_amd64.go (71%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_netbsd_arm64.go (71%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_386.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_amd64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_arm64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_openbsd_riscv64.go (73%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_plan9.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_solaris.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/cpu/cpu_windows.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/binary.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common.go (95%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_freebsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_linux.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_netbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_openbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_unix.go (62%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/common_windows.go (99%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/endian.go (88%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/sleep.go (89%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/internal/common/warnings.go (92%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix.go (58%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_cgo.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_aix_nocgo.go (83%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_bsd.go (98%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_fallback.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_freebsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_linux.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_netbsd.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_386.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_amd64.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_arm64.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_openbsd_riscv64.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_plan9.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_solaris.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/mem/mem_windows.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix.go (93%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_cgo.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_aix_nocgo.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_darwin.go (96%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_fallback.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_freebsd.go (95%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_linux.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_openbsd.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_solaris.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_unix.go (86%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/net/net_windows.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process.go (91%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_bsd.go (94%) create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_amd64.go (87%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_darwin_arm64.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_fallback.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd.go (84%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_386.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_freebsd_arm64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_linux.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd.go (85%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_386.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_amd64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_arm64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_openbsd_riscv64.go (98%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_plan9.go (92%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_posix.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_solaris.go (94%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_32bit.go (97%) rename vendor/github.com/shirou/gopsutil/{v3 => v4}/process/process_windows_64bit.go (95%) delete mode 100644 vendor/github.com/shoenig/go-m1cpu/.golangci.yaml delete mode 100644 vendor/github.com/shoenig/go-m1cpu/LICENSE delete mode 100644 vendor/github.com/shoenig/go-m1cpu/Makefile delete mode 100644 vendor/github.com/shoenig/go-m1cpu/README.md delete mode 100644 vendor/github.com/shoenig/go-m1cpu/cpu.go delete mode 100644 vendor/github.com/shoenig/go-m1cpu/incompatible.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go create mode 100644 vendor/github.com/ua-parser/uap-go/LICENSE create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/.gitignore create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/cache.go create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/device.go create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/os.go create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/parser.go create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go create mode 100644 vendor/github.com/ua-parser/uap-go/uaparser/yaml.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/auto/sdk/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/limit.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/span.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer.go create mode 100644 vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go create mode 100644 vendor/go.opentelemetry.io/collector/client/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/client/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/component/componentstatus/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go create mode 100644 vendor/go.opentelemetry.io/collector/component/componentstatus/status.go create mode 100644 vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/component/componenttest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go delete mode 100644 vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go delete mode 100644 vendor/go.opentelemetry.io/collector/component/status.go delete mode 100644 vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go create mode 100644 vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go create mode 100644 vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile delete mode 100644 vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go create mode 100644 vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go create mode 100644 vendor/go.opentelemetry.io/collector/connector/internal/factory.go rename vendor/go.opentelemetry.io/collector/connector/{ => internal}/router.go (50%) create mode 100644 vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go create mode 100644 vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE rename vendor/go.opentelemetry.io/collector/{confmap/provider/envprovider => consumer/consumererror/xconsumererror}/Makefile (100%) create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go create mode 100644 vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go delete mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go rename vendor/go.opentelemetry.io/collector/exporter/exporterhelper/{ => internal}/batch_sender.go (64%) create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go rename vendor/go.opentelemetry.io/collector/{internal/obsreportconfig/obsmetrics/obs_exporter.go => exporter/exporterhelper/internal/obsmetrics.go} (51%) create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go delete mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE rename vendor/go.opentelemetry.io/collector/{confmap/provider/fileprovider => exporter/exporterhelper/xexporterhelper}/Makefile (100%) create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go create mode 100644 vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go rename vendor/go.opentelemetry.io/collector/exporter/{exporterhelper => internal}/request.go (53%) create mode 100644 vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go create mode 100644 vendor/go.opentelemetry.io/collector/extension/experimental/storage/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go create mode 100644 vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go delete mode 100644 vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go delete mode 100644 vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go delete mode 100644 vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go delete mode 100644 vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go delete mode 100644 vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go create mode 100644 vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go rename vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/{v1experimental => v1development}/profiles_service.pb.go (84%) rename vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/{v1experimental/pprofextended.pb.go => v1development/profiles.pb.go} (55%) delete mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/pipeline.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/signal.go create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/processor/processortest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go create mode 100644 vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go rename vendor/go.opentelemetry.io/collector/{internal/obsreportconfig/obsmetrics/obs_receiver.go => receiver/internal/obsmetrics.go} (85%) create mode 100644 vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go create mode 100644 vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go create mode 100644 vendor/go.opentelemetry.io/collector/service/attributes.go create mode 100644 vendor/go.opentelemetry.io/collector/service/documentation.md delete mode 100644 vendor/go.opentelemetry.io/collector/service/host.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/host.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/node.go delete mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go create mode 100644 vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go delete mode 100644 vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go delete mode 100644 vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go create mode 100644 vendor/go.opentelemetry.io/collector/service/metadata.yaml create mode 100644 vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go delete mode 100644 vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go rename vendor/go.opentelemetry.io/collector/service/{internal/proctelemetry => telemetry/internal/otelinit}/config.go (61%) rename vendor/go.opentelemetry.io/collector/service/{telemetry.go => telemetry/metrics.go} (58%) delete mode 100644 vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go delete mode 100644 vendor/go.opentelemetry.io/collector/service/zpages.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go create mode 100644 vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/config/log.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go create mode 100644 vendor/go.opentelemetry.io/otel/log/global/README.md create mode 100644 vendor/go.opentelemetry.io/otel/log/global/log.go create mode 100644 vendor/go.opentelemetry.io/otel/log/internal/global/log.go create mode 100644 vendor/go.opentelemetry.io/otel/log/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal => }/exemplar/doc.go (93%) rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal => }/exemplar/exemplar.go (98%) rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal => }/exemplar/filter.go (75%) rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal/exemplar/rand.go => exemplar/fixed_size_reservoir.go} (73%) create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal => }/exemplar/reservoir.go (73%) rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal => }/exemplar/storage.go (94%) rename vendor/go.opentelemetry.io/otel/sdk/metric/{internal => }/exemplar/value.go (84%) create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go delete mode 100644 vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/html/atom/atom.go create mode 100644 vendor/golang.org/x/net/html/atom/table.go create mode 100644 vendor/golang.org/x/net/html/charset/charset.go create mode 100644 vendor/golang.org/x/net/html/const.go create mode 100644 vendor/golang.org/x/net/html/doc.go create mode 100644 vendor/golang.org/x/net/html/doctype.go create mode 100644 vendor/golang.org/x/net/html/entity.go create mode 100644 vendor/golang.org/x/net/html/escape.go create mode 100644 vendor/golang.org/x/net/html/foreign.go create mode 100644 vendor/golang.org/x/net/html/iter.go create mode 100644 vendor/golang.org/x/net/html/node.go create mode 100644 vendor/golang.org/x/net/html/parse.go create mode 100644 vendor/golang.org/x/net/html/render.go create mode 100644 vendor/golang.org/x/net/html/token.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/map.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/common.go create mode 100644 vendor/golang.org/x/text/internal/language/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/compact.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/language.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/parents.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/tags.go create mode 100644 vendor/golang.org/x/text/internal/language/compose.go create mode 100644 vendor/golang.org/x/text/internal/language/coverage.go create mode 100644 vendor/golang.org/x/text/internal/language/language.go create mode 100644 vendor/golang.org/x/text/internal/language/lookup.go create mode 100644 vendor/golang.org/x/text/internal/language/match.go create mode 100644 vendor/golang.org/x/text/internal/language/parse.go create mode 100644 vendor/golang.org/x/text/internal/language/tables.go create mode 100644 vendor/golang.org/x/text/internal/language/tags.go create mode 100644 vendor/golang.org/x/text/internal/tag/tag.go create mode 100644 vendor/golang.org/x/text/language/coverage.go create mode 100644 vendor/golang.org/x/text/language/doc.go create mode 100644 vendor/golang.org/x/text/language/language.go create mode 100644 vendor/golang.org/x/text/language/match.go create mode 100644 vendor/golang.org/x/text/language/parse.go create mode 100644 vendor/golang.org/x/text/language/tables.go create mode 100644 vendor/golang.org/x/text/language/tags.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go delete mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go121.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/predeclared.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/version.go delete mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/free.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/element.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go rename vendor/golang.org/x/tools/internal/versions/{toolchain_go121.go => constraint_go121.go} (75%) delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go119.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go delete mode 100644 vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go create mode 100644 vendor/gonum.org/v1/gonum/internal/order/doc.go create mode 100644 vendor/gonum.org/v1/gonum/internal/order/order.go create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding_v2.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metricregistry.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metrics.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/logger.go rename vendor/google.golang.org/grpc/{internal/grpclog/grpclog.go => grpclog/internal/loggerv2.go} (52%) rename vendor/google.golang.org/grpc/internal/grpclog/{prefixLogger.go => prefix_logger.go} (63%) create mode 100644 vendor/google.golang.org/grpc/internal/stats/labels.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_slice.go create mode 100644 vendor/google.golang.org/grpc/mem/buffers.go delete mode 100644 vendor/google.golang.org/grpc/reflection/README.md delete mode 100644 vendor/google.golang.org/grpc/reflection/adapt.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go delete mode 100644 vendor/google.golang.org/grpc/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go diff --git a/.goreleaser.yml b/.goreleaser.yml index 9304e5708d2..9ddce1cdaea 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -128,7 +128,7 @@ nfpms: - deb - rpm contents: - - src: ./example/docker-compose/shared/tempo.yaml + - src: ./tools/packaging/tempo.yaml dst: /etc/tempo/config.yml type: 'config|noreplace' - src: ./tools/packaging/tempo.service diff --git a/CHANGELOG.md b/CHANGELOG.md index d85ef145bde..d4adc6c84e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,11 @@ querier: * [CHANGE] Upgrade OTEL sdk to reduce allocs [#4243](https://github.com/grafana/tempo/pull/4243) (@joe-elliott) * [CHANGE] Tighten file permissions [#4251](https://github.com/grafana/tempo/pull/4251) (@zalegrala) * [CHANGE] Drop max live traces log message and rate limit trace too large. [#4418](https://github.com/grafana/tempo/pull/4418) (@joe-elliott) +* [CHANGE] Update the Open-Telemetry dependencies to v0.116.0 [#4466](https://github.com/grafana/tempo/pull/4466) (@yvrhdn) + **BREAKING CHANGE** After this update the Open-Telemetry Collector receiver will connect to `localhost` instead of all interfaces `0.0.0.0`. + Due to this, Tempo installations running inside Docker have to update the address they listen. + For more details on this change, see [#4465](https://github.com/grafana/tempo/issues/4465) + For more information about the security risk this change addresses, see https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks * [FEATURE] tempo-cli: support dropping multiple traces in a single operation [#4266](https://github.com/grafana/tempo/pull/4266) (@ndk) * [FEATURE] Discarded span logging `log_discarded_spans` [#3957](https://github.com/grafana/tempo/issues/3957) (@dastrobu) * [FEATURE] TraceQL support for instrumentation scope [#3967](https://github.com/grafana/tempo/pull/3967) (@ie-pham) diff --git a/cmd/tempo-query/tempo/plugin.go b/cmd/tempo-query/tempo/plugin.go index 25708d78275..ce84f3cea8f 100644 --- a/cmd/tempo-query/tempo/plugin.go +++ b/cmd/tempo-query/tempo/plugin.go @@ -18,18 +18,17 @@ import ( "github.com/gogo/protobuf/jsonpb" tlsCfg "github.com/grafana/dskit/crypto/tls" "github.com/grafana/dskit/user" - "github.com/grafana/tempo/pkg/tempopb" + jaeger "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/storage_v1" + jaeger_spanstore "github.com/jaegertracing/jaeger/storage/spanstore" + ot_jaeger "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/propagation" "go.uber.org/zap" "google.golang.org/grpc/metadata" - jaeger "github.com/jaegertracing/jaeger/model" - jaeger_spanstore "github.com/jaegertracing/jaeger/storage/spanstore" - - ot_jaeger "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" + "github.com/grafana/tempo/pkg/tempopb" ) const ( @@ -245,10 +244,7 @@ func (b *Backend) getTrace(ctx context.Context, traceID jaeger.TraceID) (*jaeger return nil, fmt.Errorf("error unmarshalling body to otlp trace %v: %w", traceID, err) } - jaegerBatches, err := ot_jaeger.ProtoFromTraces(otTrace) - if err != nil { - return nil, fmt.Errorf("error translating to jaegerBatches %v: %w", traceID, err) - } + jaegerBatches := ot_jaeger.ProtoFromTraces(otTrace) jaegerTrace := &jaeger.Trace{ Spans: []*jaeger.Span{}, @@ -372,7 +368,7 @@ func (b *Backend) FindTraces(req *storage_v1.FindTracesRequest, stream storage_v for i := 0; i < len(resp.TraceIDs); i++ { result := <-results if result.err != nil { - //// TODO this seems to be an internal inconsistency error, ignore so we can still show the rest + // TODO this seems to be an internal inconsistency error, ignore so we can still show the rest b.logger.Info("failed to get a trace", zap.Error(err), zap.String("traceid", result.traceID.String())) span.AddEvent(fmt.Sprintf("could not get trace for traceID %v", result.traceID)) span.RecordError(err) diff --git a/cmd/tempo-serverless/cloud-run/go.mod b/cmd/tempo-serverless/cloud-run/go.mod index 36ed85879cb..1caffa661f5 100644 --- a/cmd/tempo-serverless/cloud-run/go.mod +++ b/cmd/tempo-serverless/cloud-run/go.mod @@ -9,19 +9,19 @@ require ( require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/auth v0.9.9 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.1.12 // indirect cloud.google.com/go/storage v1.41.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/andybalholm/brotli v1.1.1 // indirect - github.com/apache/thrift v0.20.0 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect @@ -32,36 +32,36 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/status v1.1.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 // indirect - github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // indirect + github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/jaegertracing/jaeger v1.57.0 // indirect + github.com/jaegertracing/jaeger v1.63.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect @@ -74,23 +74,23 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.61.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/prometheus v0.54.0 // indirect + github.com/prometheus/prometheus v0.54.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/xid v1.6.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -110,31 +110,32 @@ require ( github.com/willf/bitset v1.1.11 // indirect github.com/willf/bloom v2.0.3+incompatible // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/pdata v1.12.0 // indirect - go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/pdata v1.22.0 // indirect + go.opentelemetry.io/collector/semconv v0.116.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/tools v0.27.0 // indirect google.golang.org/api v0.190.0 // indirect google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect google.golang.org/protobuf v1.35.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/cmd/tempo-serverless/cloud-run/go.sum b/cmd/tempo-serverless/cloud-run/go.sum index 0f4422f5216..167ab6a7671 100644 --- a/cmd/tempo-serverless/cloud-run/go.sum +++ b/cmd/tempo-serverless/cloud-run/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= +cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= @@ -19,30 +19,30 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvUL github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 h1:6LyjnnaLpcOKK0fbYisI+mb8CE7iNe7i89nMNQxFxs8= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= github.com/alicebob/miniredis/v2 v2.21.0 h1:CdmwIlKUWFBDS+4464GtQiQ0R1vpzOgu4Vnd74rBL7M= github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -72,14 +72,12 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -93,8 +91,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= @@ -107,8 +105,8 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -139,33 +137,27 @@ github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 h1:Dx7+6aU/fhwD2vkMr0PUcyxGat1sjUssHAKQKaS7sDM= github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzOeyLV1XPZsiqebnKky/AKS3pJNNbHVPo= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= +github.com/jaegertracing/jaeger v1.63.0 h1:4p1lg4DyZ1rZl5gCt6nnQ0nllcXVLJ96sTSbd+rnOEo= +github.com/jaegertracing/jaeger v1.63.0/go.mod h1:ZCqGiUdcAPC8HwqQGOjl89HDNEqseArdWcH+yoZxRtk= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -181,8 +173,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -191,10 +183,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -204,8 +192,6 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.81 h1:SzhMN0TQ6T/xSBu6Nvw3M5M8voM+Ht8RH3hE8S7zxaA= github.com/minio/minio-go/v7 v7.0.81/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -219,18 +205,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 h1:xDbf946Zm0rTzWcYEyUfU0Ft2KthhaH4xrNm303vpbI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0/go.mod h1:yuIyOGmQJOn37u6NVfG8yOCzVvwboqnt+pjOSTvDeLo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 h1:wNQ51LBQjPIW7ptgZOMaJT60CnMoP1t3kxgldR7uv9Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0/go.mod h1:lx+O1oAnKYPg5KaFW5Ss1Xh+zvCDKZPm73MDAFjPgak= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -240,8 +224,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe h1:oUJ5TPnrEK/z+/PeoLL+jCgfngAZIDMyhZASetRcYYg= github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= @@ -253,26 +237,26 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= -github.com/prometheus/prometheus v0.54.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= @@ -307,8 +291,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= @@ -329,24 +314,29 @@ github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRF github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.116.0 h1:Dscd6Nsnc7hjFQosO0SofcPQsXRfcj5N5PjQAslnmj4= +go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0 h1:iE6lqkO7Hi6lTIIml1RI7yQ55CKqW12R2qHinwF5Zuk= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0/go.mod h1:xQiPpjzIiXRFb+1fPxUy/3ygEZgo0Bu/xmLKOWu8vMQ= +go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= +go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -361,15 +351,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -380,11 +370,11 @@ golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -397,7 +387,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -414,8 +403,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -432,10 +421,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -443,8 +432,8 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/cmd/tempo-serverless/lambda/go.mod b/cmd/tempo-serverless/lambda/go.mod index 36c2dcb017b..62e4a1079a7 100644 --- a/cmd/tempo-serverless/lambda/go.mod +++ b/cmd/tempo-serverless/lambda/go.mod @@ -6,24 +6,24 @@ require ( github.com/aws/aws-lambda-go v1.28.0 github.com/gogo/protobuf v1.3.2 github.com/grafana/tempo v0.0.0-00010101000000-000000000000 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 ) require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/auth v0.9.9 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.1.12 // indirect cloud.google.com/go/storage v1.41.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/andybalholm/brotli v1.1.1 // indirect - github.com/apache/thrift v0.20.0 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect @@ -35,36 +35,36 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/status v1.1.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 // indirect - github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 // indirect + github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/jaegertracing/jaeger v1.57.0 // indirect + github.com/jaegertracing/jaeger v1.63.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect @@ -77,24 +77,24 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.61.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/prometheus/prometheus v0.54.0 // indirect + github.com/prometheus/prometheus v0.54.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/xid v1.6.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -114,31 +114,32 @@ require ( github.com/willf/bitset v1.1.11 // indirect github.com/willf/bloom v2.0.3+incompatible // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/pdata v1.12.0 // indirect - go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/pdata v1.22.0 // indirect + go.opentelemetry.io/collector/semconv v0.116.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.31.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/tools v0.27.0 // indirect google.golang.org/api v0.190.0 // indirect google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/grpc v1.65.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/grpc v1.68.1 // indirect google.golang.org/protobuf v1.35.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/cmd/tempo-serverless/lambda/go.sum b/cmd/tempo-serverless/lambda/go.sum index 505a30bdf53..95da0822a05 100644 --- a/cmd/tempo-serverless/lambda/go.sum +++ b/cmd/tempo-serverless/lambda/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= +cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/iam v1.1.12 h1:JixGLimRrNGcxvJEQ8+clfLxPlbeZA6MuRJ+qJNQ5Xw= cloud.google.com/go/iam v1.1.12/go.mod h1:9LDX8J7dN5YRyzVHxwQzrQs9opFFqn0Mxs9nAeB+Hhg= cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= @@ -19,30 +19,30 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvUL github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 h1:6LyjnnaLpcOKK0fbYisI+mb8CE7iNe7i89nMNQxFxs8= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= github.com/alicebob/miniredis/v2 v2.21.0 h1:CdmwIlKUWFBDS+4464GtQiQ0R1vpzOgu4Vnd74rBL7M= github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/aws/aws-lambda-go v1.28.0 h1:fZiik1PZqW2IyAN4rj+Y0UBaO1IDFlsNo9Zz/XnArK4= github.com/aws/aws-lambda-go v1.28.0/go.mod h1:jJmlefzPfGnckuHdXX7/80O3BvUUi12XOkbv4w9SGLU= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= @@ -76,14 +76,12 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -97,8 +95,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= @@ -111,8 +109,8 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -143,33 +141,27 @@ github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 h1:Dx7+6aU/fhwD2vkMr0PUcyxGat1sjUssHAKQKaS7sDM= github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzOeyLV1XPZsiqebnKky/AKS3pJNNbHVPo= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= +github.com/jaegertracing/jaeger v1.63.0 h1:4p1lg4DyZ1rZl5gCt6nnQ0nllcXVLJ96sTSbd+rnOEo= +github.com/jaegertracing/jaeger v1.63.0/go.mod h1:ZCqGiUdcAPC8HwqQGOjl89HDNEqseArdWcH+yoZxRtk= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -185,8 +177,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -195,10 +187,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= @@ -208,8 +196,6 @@ github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.81 h1:SzhMN0TQ6T/xSBu6Nvw3M5M8voM+Ht8RH3hE8S7zxaA= github.com/minio/minio-go/v7 v7.0.81/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -223,18 +209,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 h1:xDbf946Zm0rTzWcYEyUfU0Ft2KthhaH4xrNm303vpbI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0/go.mod h1:yuIyOGmQJOn37u6NVfG8yOCzVvwboqnt+pjOSTvDeLo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 h1:wNQ51LBQjPIW7ptgZOMaJT60CnMoP1t3kxgldR7uv9Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0/go.mod h1:lx+O1oAnKYPg5KaFW5Ss1Xh+zvCDKZPm73MDAFjPgak= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -244,8 +228,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe h1:oUJ5TPnrEK/z+/PeoLL+jCgfngAZIDMyhZASetRcYYg= github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe/go.mod h1:OqBBRGBl7+llplCvDMql8dEKaDqjaFA/VAPw+OJiNiw= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= @@ -257,26 +241,26 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= -github.com/prometheus/prometheus v0.54.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -314,8 +298,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= @@ -337,24 +322,29 @@ github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 h1:5mLPGnFdSsevFRF github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.116.0 h1:Dscd6Nsnc7hjFQosO0SofcPQsXRfcj5N5PjQAslnmj4= +go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0 h1:iE6lqkO7Hi6lTIIml1RI7yQ55CKqW12R2qHinwF5Zuk= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0/go.mod h1:xQiPpjzIiXRFb+1fPxUy/3ygEZgo0Bu/xmLKOWu8vMQ= +go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= +go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -369,15 +359,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -388,11 +378,11 @@ golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -405,7 +395,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -422,8 +411,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -440,10 +429,10 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf h1:OqdXDEakZCVtDiZTjcxfwbHPCT11ycCEsTKesBVKvyY= google.golang.org/genproto v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:mCr1K1c8kX+1iSBREvU3Juo11CB+QOEWxbRS01wWl5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f h1:b1Ln/PG8orm0SsBbHZWke8dDp2lrCD4jSmfglFpTZbk= -google.golang.org/genproto/googleapis/api v0.0.0-20240725223205-93522f1f2a9f/go.mod h1:AHT0dDg3SoMOgZGnZk29b5xTbPHMoEC8qthmBLJCpys= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -451,8 +440,8 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/example/docker-compose/alloy/docker-compose.yaml b/example/docker-compose/alloy/docker-compose.yaml index e732cb3287e..3a8f1b7777a 100644 --- a/example/docker-compose/alloy/docker-compose.yaml +++ b/example/docker-compose/alloy/docker-compose.yaml @@ -19,11 +19,8 @@ services: - ../shared/tempo.yaml:/etc/tempo.yaml - ./tempo-data:/var/tempo ports: - - "14268" # jaeger ingest - - "3200" # tempo - - "4317" # otlp grpc - - "4318" # otlp http - - "9411" # zipkin2024-04-23T16:16:57+0000 + - "3200" # tempo + - "4317" # otlp grpc depends_on: - init diff --git a/example/docker-compose/cross-cluster/tempo-distributed-a.yaml b/example/docker-compose/cross-cluster/tempo-distributed-a.yaml index 9175545b8dc..83d11f4feeb 100644 --- a/example/docker-compose/cross-cluster/tempo-distributed-a.yaml +++ b/example/docker-compose/cross-cluster/tempo-distributed-a.yaml @@ -2,19 +2,11 @@ server: http_listen_port: 3200 distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "0.0.0.0:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/cross-cluster/tempo-distributed-b.yaml b/example/docker-compose/cross-cluster/tempo-distributed-b.yaml index c90ed977346..35a2cfed8c0 100644 --- a/example/docker-compose/cross-cluster/tempo-distributed-b.yaml +++ b/example/docker-compose/cross-cluster/tempo-distributed-b.yaml @@ -2,19 +2,11 @@ server: http_listen_port: 3200 distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "0.0.0.0:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/debug/docker-compose.yaml b/example/docker-compose/debug/docker-compose.yaml index b6d7a60397a..39fd958be38 100644 --- a/example/docker-compose/debug/docker-compose.yaml +++ b/example/docker-compose/debug/docker-compose.yaml @@ -21,12 +21,9 @@ services: environment: - DEBUG_BLOCK=0 ports: - - "14268:14268" # jaeger ingest - - "3200:3200" # tempo - - "4317:4317" # otlp grpc - - "4318:4318" # otlp http - - "9411:9411" # zipkin - - "2345:2345" # delve debug server + - "3200:3200" # tempo + - "4317:4317" # otlp grpc + - "2345:2345" # delve debug server depends_on: - init diff --git a/example/docker-compose/distributed/tempo-distributed.yaml b/example/docker-compose/distributed/tempo-distributed.yaml index b653c473560..767d4f38870 100644 --- a/example/docker-compose/distributed/tempo-distributed.yaml +++ b/example/docker-compose/distributed/tempo-distributed.yaml @@ -2,19 +2,11 @@ server: http_listen_port: 3200 distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "0.0.0.0:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally @@ -66,4 +58,4 @@ overrides: defaults: metrics_generator: processors: ['service-graphs', 'span-metrics', 'local-blocks'] - generate_native_histograms: both \ No newline at end of file + generate_native_histograms: both diff --git a/example/docker-compose/local/tempo.yaml b/example/docker-compose/local/tempo.yaml index 822a60c1259..9cacb4c45d1 100644 --- a/example/docker-compose/local/tempo.yaml +++ b/example/docker-compose/local/tempo.yaml @@ -33,15 +33,23 @@ distributor: jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! + endpoint: "tempo:14268" # for a production deployment you should only enable the receivers you need! + grpc: + endpoint: "tempo:14250" thrift_binary: + endpoint: "tempo:6832" thrift_compact: + endpoint: "tempo:6831" zipkin: + endpoint: "tempo:9411" otlp: protocols: - http: grpc: + endpoint: "tempo:4317" + http: + endpoint: "tempo:4318" opencensus: + endpoint: "tempo:55678" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/multi-tenant/tempo.yaml b/example/docker-compose/multi-tenant/tempo.yaml index 655f56b04b8..bbece3ad60a 100644 --- a/example/docker-compose/multi-tenant/tempo.yaml +++ b/example/docker-compose/multi-tenant/tempo.yaml @@ -17,19 +17,11 @@ query_frontend: duration_slo: 5s distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "tempo:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/example/docker-compose/otel-collector-multitenant/docker-compose.yaml b/example/docker-compose/otel-collector-multitenant/docker-compose.yaml index b84c6bc2dd1..14b835f7c95 100644 --- a/example/docker-compose/otel-collector-multitenant/docker-compose.yaml +++ b/example/docker-compose/otel-collector-multitenant/docker-compose.yaml @@ -19,11 +19,8 @@ services: - ../shared/tempo.yaml:/etc/tempo.yaml - ./tempo-data:/var/tempo ports: - - "14268" # jaeger ingest - - "3200" # tempo - - "4317" # otlp grpc - - "4318" # otlp http - - "9411" # zipkin + - "3200" # tempo + - "4317" # otlp grpc depends_on: - init diff --git a/example/docker-compose/otel-collector/docker-compose.yaml b/example/docker-compose/otel-collector/docker-compose.yaml index c9c3fbedac5..687b63bbcd6 100644 --- a/example/docker-compose/otel-collector/docker-compose.yaml +++ b/example/docker-compose/otel-collector/docker-compose.yaml @@ -19,11 +19,8 @@ services: - ../shared/tempo.yaml:/etc/tempo.yaml - ./tempo-data:/var/tempo ports: - - "14268" # jaeger ingest - - "3200" # tempo - - "4317" # otlp grpc - - "4318" # otlp http - - "9411" # zipkin + - "3200" # tempo + - "4317" # otlp grpc depends_on: - init diff --git a/example/docker-compose/shared/tempo.yaml b/example/docker-compose/shared/tempo.yaml index da019fdec19..d8a53271d99 100644 --- a/example/docker-compose/shared/tempo.yaml +++ b/example/docker-compose/shared/tempo.yaml @@ -14,19 +14,11 @@ query_frontend: duration_slo: 5s distributor: - receivers: # this configuration will listen on all ports and protocols that tempo is capable of. - jaeger: # the receives all come from the OpenTelemetry collector. more configuration information can - protocols: # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver - thrift_http: # - grpc: # for a production deployment you should only enable the receivers you need! - thrift_binary: - thrift_compact: - zipkin: + receivers: otlp: protocols: - http: grpc: - opencensus: + endpoint: "tempo:4317" ingester: max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally diff --git a/go.mod b/go.mod index ba1b31ebe8d..14cd07d0d7f 100644 --- a/go.mod +++ b/go.mod @@ -28,46 +28,45 @@ require ( github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 github.com/grafana/e2e v0.1.1 github.com/hashicorp/go-hclog v1.6.3 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect - github.com/jaegertracing/jaeger v1.57.0 + github.com/jaegertracing/jaeger v1.63.0 github.com/jedib0t/go-pretty/v6 v6.2.4 github.com/json-iterator/go v1.1.12 github.com/jsternberg/zap-logfmt v1.2.0 github.com/klauspost/compress v1.17.11 - github.com/minio/minio-go/v7 v7.0.81 + github.com/minio/minio-go/v7 v7.0.80 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c github.com/olekukonko/tablewriter v0.0.5 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.55.0 - github.com/prometheus/prometheus v0.54.0 - github.com/prometheus/statsd_exporter v0.26.0 + github.com/prometheus/common v0.61.0 + github.com/prometheus/prometheus v0.54.1 + github.com/prometheus/statsd_exporter v0.26.1 github.com/segmentio/fasthash v1.0.3 github.com/sony/gobreaker v0.4.1 github.com/spf13/viper v1.19.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/uber-go/atomic v1.4.0 github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/willf/bloom v2.0.3+incompatible go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.102.1 - go.opentelemetry.io/collector/component v0.102.1 - go.opentelemetry.io/collector/confmap v0.102.1 - go.opentelemetry.io/collector/consumer v0.102.1 - go.opentelemetry.io/collector/pdata v1.12.0 - go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/otel v1.31.0 - go.opentelemetry.io/otel/bridge/opencensus v1.27.0 - go.opentelemetry.io/otel/bridge/opentracing v1.26.0 + go.opentelemetry.io/collector v0.116.0 // indirect + go.opentelemetry.io/collector/component v0.116.0 + go.opentelemetry.io/collector/confmap v1.22.0 + go.opentelemetry.io/collector/consumer v1.22.0 + go.opentelemetry.io/collector/pdata v1.22.0 + go.opentelemetry.io/collector/semconv v0.116.0 // indirect + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/bridge/opencensus v1.33.0 + go.opentelemetry.io/otel/bridge/opentracing v1.33.0 go.opentelemetry.io/otel/exporters/jaeger v1.17.0 - go.opentelemetry.io/otel/metric v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 + go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 go.uber.org/multierr v1.11.0 @@ -75,7 +74,7 @@ require ( golang.org/x/sync v0.10.0 golang.org/x/time v0.5.0 google.golang.org/api v0.188.0 - google.golang.org/grpc v1.65.0 + google.golang.org/grpc v1.68.1 google.golang.org/protobuf v1.35.2 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -84,64 +83,79 @@ require ( require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 github.com/evanphx/json-patch v5.9.0+incompatible - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 github.com/googleapis/gax-go/v2 v2.13.0 - github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 + github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.116.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.116.0 github.com/parquet-go/parquet-go v0.23.1-0.20241011155651-6446d1d0d2fe github.com/stoewer/parquet-cli v0.0.9 - go.opentelemetry.io/collector/config/configgrpc v0.102.1 - go.opentelemetry.io/collector/config/confighttp v0.102.1 - go.opentelemetry.io/collector/config/configtls v1.18.0 - go.opentelemetry.io/collector/exporter v0.102.1 - go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1 - go.opentelemetry.io/collector/extension v0.102.1 - go.opentelemetry.io/collector/otelcol v0.102.1 - go.opentelemetry.io/collector/pdata/testdata v0.102.1 - go.opentelemetry.io/collector/processor v0.102.1 - go.opentelemetry.io/collector/receiver v0.102.1 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 - go.opentelemetry.io/contrib/exporters/autoexport v0.53.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 - go.opentelemetry.io/otel/exporters/prometheus v0.50.0 - go.opentelemetry.io/otel/sdk/metric v1.28.0 - go.opentelemetry.io/proto/otlp v1.3.1 - golang.org/x/net v0.31.0 - golang.org/x/oauth2 v0.21.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d + go.opentelemetry.io/collector/client v1.22.0 + go.opentelemetry.io/collector/component/componenttest v0.116.0 + go.opentelemetry.io/collector/config/configgrpc v0.116.0 + go.opentelemetry.io/collector/config/confighttp v0.116.0 + go.opentelemetry.io/collector/config/configtls v1.22.0 + go.opentelemetry.io/collector/exporter v0.116.0 + go.opentelemetry.io/collector/exporter/exportertest v0.116.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.116.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.116.0 + go.opentelemetry.io/collector/extension v0.116.0 + go.opentelemetry.io/collector/otelcol v0.116.0 + go.opentelemetry.io/collector/pdata/testdata v0.116.0 + go.opentelemetry.io/collector/processor v0.116.0 + go.opentelemetry.io/collector/receiver v0.116.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.116.0 + go.opentelemetry.io/contrib/exporters/autoexport v0.58.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 + go.opentelemetry.io/otel/exporters/prometheus v0.55.0 + go.opentelemetry.io/otel/sdk/metric v1.33.0 + go.opentelemetry.io/proto/otlp v1.4.0 + golang.org/x/net v0.32.0 + golang.org/x/oauth2 v0.24.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 ) require ( cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.7.0 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.10 // indirect + cloud.google.com/go/auth v0.9.9 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/iam v1.1.11 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect - github.com/IBM/sarama v1.43.2 // indirect - github.com/VividCortex/gohistogram v1.0.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 // indirect + github.com/IBM/sarama v1.43.3 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect + github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 // indirect github.com/andybalholm/brotli v1.1.1 // indirect - github.com/apache/thrift v0.20.0 // indirect + github.com/antchfx/xmlquery v1.4.2 // indirect + github.com/antchfx/xpath v1.3.2 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 // indirect github.com/aws/aws-sdk-go-v2 v1.22.2 // indirect github.com/aws/aws-sdk-go-v2/config v1.24.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 // indirect + github.com/aws/smithy-go v1.16.0 // indirect github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -150,16 +164,18 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/ebitengine/purego v0.8.1 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/expr-lang/expr v1.16.2 // indirect + github.com/elastic/go-grok v0.3.1 // indirect + github.com/elastic/lunes v0.1.0 // indirect + github.com/expr-lang/expr v1.16.9 // indirect github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-kit/kit v0.13.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -172,18 +188,19 @@ require ( github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/go-openapi/validate v0.23.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/btree v1.1.2 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/gorilla/handlers v1.5.1 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/gorilla/handlers v1.5.2 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/consul/api v1.29.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -199,7 +216,6 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect github.com/iancoleman/strcase v0.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -211,11 +227,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.8 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/knadh/koanf v1.5.0 // indirect - github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/knadh/koanf/v2 v2.1.2 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect + github.com/magefile/mage v1.15.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -225,59 +242,58 @@ require ( github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.116.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.116.0 // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/alertmanager v0.27.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/relvacode/iso8601 v1.4.0 // indirect + github.com/relvacode/iso8601 v1.6.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rs/cors v1.11.0 // indirect + github.com/rs/cors v1.11.1 // indirect github.com/rs/xid v1.6.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/sercand/kuberesolver/v5 v5.1.1 // indirect - github.com/shirou/gopsutil/v3 v3.24.4 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/shirou/gopsutil/v4 v4.24.11 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/tklauser/go-sysconf v0.3.12 // indirect - github.com/tklauser/numcpus v0.6.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/willf/bitset v1.1.11 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect @@ -289,45 +305,63 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect go.etcd.io/etcd/client/v3 v3.5.12 // indirect go.mongodb.org/mongo-driver v1.15.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.102.1 // indirect - go.opentelemetry.io/collector/config/configcompression v1.9.0 // indirect - go.opentelemetry.io/collector/config/confignet v0.102.1 // indirect - go.opentelemetry.io/collector/config/configopaque v1.18.0 // indirect - go.opentelemetry.io/collector/config/configretry v0.102.1 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.102.1 // indirect - go.opentelemetry.io/collector/config/internal v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 // indirect - go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 // indirect - go.opentelemetry.io/collector/connector v0.102.1 // indirect - go.opentelemetry.io/collector/extension/auth v0.102.1 // indirect - go.opentelemetry.io/collector/featuregate v1.9.0 // indirect - go.opentelemetry.io/collector/service v0.102.1 // indirect - go.opentelemetry.io/contrib/bridges/prometheus v0.53.0 // indirect - go.opentelemetry.io/contrib/config v0.7.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect - go.opentelemetry.io/otel/log v0.4.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.4.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.116.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.116.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.22.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.22.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.22.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.22.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.116.0 // indirect + go.opentelemetry.io/collector/config/internal v0.116.0 // indirect + go.opentelemetry.io/collector/connector v0.116.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.116.0 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.116.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.116.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.116.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.116.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.116.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.116.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.116.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.116.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.116.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.116.0 // indirect + go.opentelemetry.io/collector/extension/extensiontest v0.116.0 // indirect + go.opentelemetry.io/collector/featuregate v1.22.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.116.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.116.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.116.0 // indirect + go.opentelemetry.io/collector/pipeline v0.116.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.116.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.116.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.116.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.116.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.116.0 // indirect + go.opentelemetry.io/collector/service v0.116.0 // indirect + go.opentelemetry.io/contrib/bridges/otelzap v0.8.0 // indirect + go.opentelemetry.io/contrib/bridges/prometheus v0.58.0 // indirect + go.opentelemetry.io/contrib/config v0.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.9.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.9.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.9.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 // indirect + go.opentelemetry.io/otel/log v0.9.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.9.0 // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.19.0 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.23.0 // indirect - gonum.org/v1/gonum v0.15.0 // indirect + golang.org/x/tools v0.27.0 // indirect + gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect gopkg.in/ini.v1 v1.67.0 // indirect k8s.io/apimachinery v0.29.3 // indirect k8s.io/client-go v0.29.3 // indirect diff --git a/go.sum b/go.sum index e9b4977c292..14e76a2b8b5 100644 --- a/go.sum +++ b/go.sum @@ -15,22 +15,22 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= -cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= -cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= -cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= +cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI= -cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps= +cloud.google.com/go/iam v1.1.11 h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw= +cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -55,14 +55,14 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/go.mod h1:c+Lifp3EDEamAkPVzMooRNOK6CZjNSdEnf1A7jsI9u4= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 h1:6LyjnnaLpcOKK0fbYisI+mb8CE7iNe7i89nMNQxFxs8= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= @@ -70,14 +70,12 @@ github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1v github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= -github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/IBM/sarama v1.43.3 h1:Yj6L2IaNvb2mRBop39N7mmJAHBVY3dTPncr3qGVkxPA= +github.com/IBM/sarama v1.43.3/go.mod h1:FVIRaLrhK3Cla/9FfRF5X9Zua2KpS3SYIXxhac1H+FQ= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/kong v0.8.0 h1:ryDCzutfIqJPnNn0omnrgHLbAggDQM2VWHikE1xqK7s= @@ -91,17 +89,22 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 h1:uvdUDbHQHO85qeSydJtItA4T55Pw6BtAejd0APRJOCE= +github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis/v2 v2.21.0 h1:CdmwIlKUWFBDS+4464GtQiQ0R1vpzOgu4Vnd74rBL7M= github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv52acZ6kqeS0t88= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA= +github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA= +github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U= +github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.20.0 h1:631+KvYbsBZxmuJjYwhezVsrfc/TbqtZV4QcxOX1fOI= -github.com/apache/thrift v0.20.0/go.mod h1:hOk1BQqcp2OLzGsyVXdfMk7YFlMxK3aoEVhjD06QhB8= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= @@ -110,6 +113,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 h1:UyjtGmO0Uwl/K+zpzPwLoXzMhcN9xmnR2nrqJoBrg3c= +github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0/go.mod h1:TJAXuFs2HcMib3sN5L0gUC+Q01Qvy3DemvA55WuC+iA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= @@ -155,8 +160,6 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -175,14 +178,14 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -195,10 +198,10 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= -github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -208,31 +211,37 @@ github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b h1:ZHiD4/yE4idlbqvAO6iYCOYRzOMRpxkW+FKasRA3tsQ= github.com/efficientgo/tools/core v0.0.0-20220225185207-fe763185946b/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= +github.com/elastic/go-grok v0.3.1 h1:WEhUxe2KrwycMnlvMimJXvzRa7DoByJB4PVUIE1ZD/U= +github.com/elastic/go-grok v0.3.1/go.mod h1:n38ls8ZgOboZRgKcjMY8eFeZFMmcL9n2lP0iHhIDk64= +github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4= +github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/expr-lang/expr v1.16.2 h1:JvMnzUs3LeVHBvGFcXYmXo+Q6DPDmzrlcSBO6Wy3w4s= -github.com/expr-lang/expr v1.16.2/go.mod h1:uCkhfG+x7fcZ5A5sXHKuQ07jGZRl6J0FCAaf2k4PtVQ= +github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= +github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -241,7 +250,6 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -249,8 +257,8 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -260,8 +268,6 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.13.0 h1:OoneCcHKHQ03LfBpoQCUfCluwd2Vt3ohz+kvbJneZAU= -github.com/go-kit/kit v0.13.0/go.mod h1:phqEHMMUbyrCFCTgH48JueqrM3md2HcAZ8N3XE4FKDg= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= @@ -304,14 +310,14 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= @@ -327,8 +333,9 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -377,7 +384,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -398,21 +404,21 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0= github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= @@ -423,8 +429,8 @@ github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9 h1:Dx7+6aU/fhwD2vkMr github.com/grafana/dskit v0.0.0-20241115082728-f2a7eb3aa0e9/go.mod h1:SPLNCARd4xdjCkue0O6hvuoveuS1dGJjDnfxYe405YQ= github.com/grafana/e2e v0.1.1 h1:/b6xcv5BtoBnx8cZnCiey9DbjEc8z7gXHO5edoeRYxc= github.com/grafana/e2e v0.1.1/go.mod h1:RpNLgae5VT+BUHvPE+/zSypmOXKwEu4t+tnEMS1ATaE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 h1:X8IKQ0wu40wpvYcKfBcc5T4QnhdQjUhtUtB/1CY89lE= -github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzOeyLV1XPZsiqebnKky/AKS3pJNNbHVPo= +github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91 h1:/NipyHnOmvRsVzj81j2qE0VxsvsqhOB0f4vJIhk2qCQ= github.com/grafana/memberlist v0.3.1-0.20220708130638-bd88e10a3d91/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= @@ -433,10 +439,12 @@ github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrR github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= @@ -471,8 +479,6 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= @@ -495,6 +501,7 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -512,8 +519,6 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -528,8 +533,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jaegertracing/jaeger v1.57.0 h1:3wDtUUPs6NRYH7+d+y8MilDkLHdpPrVlQ2wbcsA62bs= -github.com/jaegertracing/jaeger v1.57.0/go.mod h1:p/1fxIU9hKHl7qEhKC72p2ZYVhvvZvNB73y6V7YyuTs= +github.com/jaegertracing/jaeger v1.63.0 h1:4p1lg4DyZ1rZl5gCt6nnQ0nllcXVLJ96sTSbd+rnOEo= +github.com/jaegertracing/jaeger v1.63.0/go.mod h1:ZCqGiUdcAPC8HwqQGOjl89HDNEqseArdWcH+yoZxRtk= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -544,8 +549,6 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jedib0t/go-pretty/v6 v6.2.4 h1:wdaj2KHD2W+mz8JgJ/Q6L/T5dB7kyqEFI16eLq7GEmk= github.com/jedib0t/go-pretty/v6 v6.2.4/go.mod h1:+nE9fyyHGil+PuISTCrp7avEdo6bqoMwqZnuiK2r2a0= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -575,12 +578,12 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/knadh/koanf v1.5.0 h1:q2TSd/3Pyc/5yP9ldIrSdIz26MCcyNQzW0pEAugLPNs= github.com/knadh/koanf v1.5.0/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -598,9 +601,10 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -631,8 +635,8 @@ github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.81 h1:SzhMN0TQ6T/xSBu6Nvw3M5M8voM+Ht8RH3hE8S7zxaA= -github.com/minio/minio-go/v7 v7.0.81/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= +github.com/minio/minio-go/v7 v7.0.80 h1:2mdUHXEykRdY/BigLt3Iuu1otL0JTogT0Nmltg0wujk= +github.com/minio/minio-go/v7 v7.0.80/go.mod h1:84gmIilaX4zcvAWWzJ5Z1WI5axN+hAbM5w25xf8xvC0= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -641,7 +645,6 @@ github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HK github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -672,8 +675,6 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -682,46 +683,48 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 h1:R70PpK14trQfL/Vj5oAiGRqX09s2gOWuf6t1Ae5fevQ= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0/go.mod h1:xmy/yFFmB1Epy+czrYMbA+4xeOKvhFqNqYWU6qINeis= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 h1:N3vWsp3xealy4AX8TovfHG5EKi/k7z+F/8LFP4SVAgo= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0/go.mod h1:/Ijok2yF1qYoHuRHvyLS04ZuW91Pue2VkqZ/nZxpkvk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 h1:PNLVcz8kJLE9V5kGnbBh277Bvl4WwiVZ+NbFbOB80WY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0/go.mod h1:cBbjwd8m4rBVgCQksUbAVQX1EoM5IuCyNQw2mzvibEM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 h1:qsM5HhWpAfIMg8LdO4u+CHofu4UuCuJwg/M+ySO9uZA= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0/go.mod h1:wBJlGy9Wx6s7AxIMcSne2sGw73e5ZUy1AQ/duYwpFf8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 h1:f3HVDcjUVUbOpKWiebD9v8+9YdDdNvzPyKh3IVb0ORY= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0/go.mod h1:110wLws4lB2Jpv58rK7YoaMIhIEmLlzw5/viC0XJhbM= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 h1:xBd9EXG9qvWwa2d7qDRVv/D/2gAQqn1zGbPqdjkd+O8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0/go.mod h1:e4pc6nkNyzBi5g2RgIRjJ1slRsOY5qHIbPu0E4oM3cE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 h1:/J1Q2tylp8ID+AIpCmfaArUyCPoSjY3nyZXdkpTw9J8= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0/go.mod h1:lbNQBpvs40lInohZrqAbRZ+8r29GzfMfkbLV4fBPrzE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 h1:pVJ792+Nzcv8nLlg18XOLOWEZ/dCK+Wo3Iak5TU8rz8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0/go.mod h1:DmkGhNL9nuSTg8fMhYNopMuF1Y3LFqu/FQHrvhBzME0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 h1:bVeo7BahYY4rWdaEuzJX2Tn20MbvYcEHXbFjV2IwnPQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0/go.mod h1:lj29zRdEZdvbQvZ6g7Pb+tSPw4dQE/jahe39fRgO+08= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0 h1:vJL6lDaeI3pVA7ADnWKD3HMpI80BSrZ2UnGc+qkwqoY= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.102.0/go.mod h1:xtE7tds5j8PtI/wMuGb+Em5K9rJH8hm6t28Qe4QrpoU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 h1:TvJYcU/DLRFCgHr7nT98k5D+qkZ4syKVxc8OJjv+K4c= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0/go.mod h1:WzD3Ox7tywAQHknxAFpAC1oZJGItMp5mbvgUGjvzNY8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 h1:IgLMHSuraJzxLqVeM7xU7aZPcXS5/eoVnX+HBuFGQ6E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0/go.mod h1:hG8EmxUvgXIiKTG6+UVcMhFeIN6UD/bswP7WYpQ2lCc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 h1:4VQidhCgkJiBvBDMOukr5ixrf5uP66iW5Hb+CFsb+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0/go.mod h1:nMto9zkv0vD8YI3oGZFZS2Uu7k2oHt1d+xUHN/ofUYo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 h1:Mh5MHf0PrUQMTM2S8HwEuPt3Fyz0Xnt0IG7GUc6Fmbs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0/go.mod h1:6fc8qnIayeGwAF41LyLR+/FRbyJf4+FikbmaO0GGq/Y= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 h1:5M7I78lyGsH+Xyy4NoXKM/UUCa52aZQiPcSX6so6x94= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0/go.mod h1:BEQy0zEel5uIOTEFBBmvQJ4A32R6nKLtSMtC6ylLI8k= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 h1:IfJ9EkykXHBYdwuvZd0qfFcCHAAOuTEaiIlTRw6R9g4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0/go.mod h1:uTs2ukYAG9tCkoUhW39J9bNQAqwpqHhE85eeRVm6zCM= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 h1:HTGSfx2HzfudY1Uczw9yTBJnGBmTVFYzpGH1z+oD0nU= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0/go.mod h1:Hlz24+Ah6Ojk0FUKNb1watRmTbLEru35+feroKA7dvQ= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 h1:2D3niNAKkr+NRVmAJW0bquSjzHUL6Pf1qQRLRPwA13M= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0/go.mod h1:h0uqwH7b+NGDfFFWTjoGErMdYRdCqP1Az1/G+tfG024= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 h1:dBhFe/29ODIbxg4+JRaHwYAHMFFeh6/+izVtjceXwew= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0/go.mod h1:WNFjuquVqyi+WEoa6L0J3DzPLRsP24ZlbZYwKv49VwY= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 h1:Pemo9pZa3VMYdrM/bss3f0qqVyBzPSulOBQL8VQcgN8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0/go.mod h1:fvjAM+jOQdiXCmAENKH/eWxBBqTaImbq3lpoBI4X5Ek= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.116.0 h1:4Pgj/AuFEW6r654llnTuZZwk09xiovGYcbMsYrumu+U= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.116.0/go.mod h1:KZe8BzQcC+KNs4P//Dq2pg1gpuLC4NTsSPWdowIMo9E= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.116.0 h1:Kd+DShxtBBdJb6ZVBA/H6gEMSqTIm0enTkQvqGdzJ5M= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.116.0/go.mod h1:n8cRa05pYSeN440OJr+WC70UzK1/NIKZQKoMmJ12Yes= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.116.0 h1:va5jloH1Uwrnx8rymcG7ZqLJ49/zWGMz5Dy/iMm1JzI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.116.0/go.mod h1:WXJuadNLluxAiVZts1bAJbhAVurBpogToBbjtFKzie8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 h1:xDbf946Zm0rTzWcYEyUfU0Ft2KthhaH4xrNm303vpbI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0/go.mod h1:yuIyOGmQJOn37u6NVfG8yOCzVvwboqnt+pjOSTvDeLo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.116.0 h1:5T7nt2K73pHYT131FTj9BjS6xdjIJZQ3fzp9nCB1dYo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.116.0/go.mod h1:SY4FYy6K/cr63UY6ImhNSRy/1/WmK6fvSHsSpW+lT1Q= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.116.0 h1:sp2pcY/jJItuB14+omWqIm6ImSeZiKCtAm0MD+9Ex5w= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.116.0/go.mod h1:51jLoNTWbZ8wHqYU41IkGbNtlLaqAnBXEqpHleDhFY0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.116.0 h1:B4vA2+XhP1fncFxeaH9P1VNpEXKJIUgXGpvGGwbOPGg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.116.0/go.mod h1:+0FjpCBgkcLsumJTuLInasEA67JaizGakt3PRAPbFpQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.116.0 h1:Gdmg6Pr9X/h+HqTnRWCnH0tcLbCRymXJ9Lj1RGchs3M= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.116.0/go.mod h1:jJseeb/N41qHL1YOd+dzhgyEMR04s7cbkD6dsXKjoUk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.116.0 h1:Oi6UZR5FNij7eifaFtjbBXFxfNXuPCH+37PODtZSu/o= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.116.0/go.mod h1:1q/L2R/28emNCz0EHfxEw853I6lPxTcHTqS+UrMea0k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.116.0 h1:LCyHhStq7UbCHxCiTHIpGhhMWFv/mA1ecV6wduzicYw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.116.0/go.mod h1:wpgb30Nj/PwrTBCRm4b1EQNHhk4P5uILvqogiKD2+2w= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0 h1:RlEK9MbxWyBHbLel8EJ1L7DbYVLai9dZL6Ljl2cBgyA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.116.0/go.mod h1:AVUEyIjPb+0ARr7mhIkZkdNg3fd0ZcRhzAi53oZhl1Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 h1:jwnZYRBuPJnsKXE5H6ZvTEm91bXW5VP8+tLewzl54eg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0/go.mod h1:NT3Ag+DdnIAZQfD7l7OHwlYqnaAJ19SoPZ0nhD9yx4s= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.116.0 h1:QqlVh8hjFr+U1HDI8VeCXg0nJGKeKce4lemGhSqdteo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.116.0/go.mod h1:GjaQr/5Atnc20NDg2YFa4A+Ly9pojsy3+Vi+BQ5DW/4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 h1:wNQ51LBQjPIW7ptgZOMaJT60CnMoP1t3kxgldR7uv9Q= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0/go.mod h1:lx+O1oAnKYPg5KaFW5Ss1Xh+zvCDKZPm73MDAFjPgak= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.116.0 h1:u0s9tEyw7Q7hQh9yOrXNeAh/lt79gz3Un+TOfFSYiuI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.116.0/go.mod h1:SmrjCemPe9bxE4Al5bhNhxT/eQUtsCFT5CwoYbUlIB0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.116.0 h1:mEKOrQ6dqWkfPTVEND41Jsvgktdlnk774GZIeU0IZRE= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.116.0/go.mod h1:9TzKV3YvfXnz9PUoKluRG3iQkOVQMNZcUJy+CUnthkY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.116.0 h1:XHe77t+TheItR0xPgtmk3+ijTdXlkupCNXidzUEyjbc= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.116.0/go.mod h1:x/u2jTCynduMEoBbjSw5aq29E++NzjnXoMADtDEfpO0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.116.0 h1:Rgqfcmb6+Bsb99UOPCpfnVrH8ytI/F0KU3lmx2tnNk4= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.116.0/go.mod h1:LQBflOCH8C1rrcW4iYWDe2InZx+U5jY4HLnShmi+jR8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.116.0 h1:+YA1AlJv4CWSLun5O6RRIK/Eac+Wekw/6k+45xR5yXM= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.116.0/go.mod h1:WCbw0k2xv2RvMUzhrAT1sIAQPCvdnR7wr/6bW6aQJIE= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.116.0 h1:yRYhsRqpXGtZ6nPuUp2XuUDzmi8TdtEheJCWegGcY7I= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.116.0/go.mod h1:RUpCHfyFFQviTjpbX6cFJbOp91iYgO0Ca9cTjGnDfj8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.116.0 h1:M9nXu5HHi1Prf2h3o2W+nI2RXvXuVF0qh+hNP8p7pAI= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.116.0/go.mod h1:nbNJQEln1t8+0xyYK4E4ynUAvEv3QXaeMWIN2J+FWPI= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -743,8 +746,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -757,14 +760,15 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -773,8 +777,8 @@ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -786,8 +790,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= @@ -799,26 +803,26 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.0 h1:6+VmEkohHcofl3W5LyRlhw1Lfm575w/aX6ZFyVAmzM0= -github.com/prometheus/prometheus v0.54.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= -github.com/prometheus/statsd_exporter v0.26.0 h1:SQl3M6suC6NWQYEzOvIv+EF6dAMYEqIuZy+o4H9F5Ig= -github.com/prometheus/statsd_exporter v0.26.0/go.mod h1:GXFLADOmBTVDrHc7b04nX8ooq3azG61pnECNqT7O5DM= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/prometheus/statsd_exporter v0.26.1 h1:ucbIAdPmwAUcA+dU+Opok8Qt81Aw8HanlO+2N/Wjv7w= +github.com/prometheus/statsd_exporter v0.26.1/go.mod h1:XlDdjAmRmx3JVvPPYuFNUg+Ynyb5kR69iPPkQjxXFMk= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= -github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs= -github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= +github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= +github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.11.0 h1:0B9GE/r9Bc2UxRMMtymBkHTenPkHDv0CW4Y98GBY+po= -github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -837,12 +841,8 @@ github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtr github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= -github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU= -github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= +github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -859,8 +859,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= @@ -883,17 +883,20 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf h1:onQsPyHlq2yIWU+Nfl6yStuqnZuVQQN8FZ8sBb2wqtw= github.com/thanos-io/objstore v0.0.0-20220809103346-8ef1f215e2bf/go.mod h1:v0NhuxxxUFUPatQcVNSCUkBEVezXzl7LSdaBOZygq98= -github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa h1:VzPR4xFM7HARqNocjdHg75ZL9SAgFtaF3P57ZdDcG6I= +github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E= github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= @@ -943,134 +946,178 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.102.1 h1:M/ciCcReQsSDYG9bJ2Qwqk7pQILDJ2bM/l0MdeCAvJE= -go.opentelemetry.io/collector v0.102.1/go.mod h1:yF1lDRgL/Eksb4/LUnkMjvLvHHpi6wqBVlzp+dACnPM= -go.opentelemetry.io/collector/component v0.102.1 h1:66z+LN5dVCXhvuVKD1b56/3cYLK+mtYSLIwlskYA9IQ= -go.opentelemetry.io/collector/component v0.102.1/go.mod h1:XfkiSeImKYaewT2DavA80l0VZ3JjvGndZ8ayPXfp8d0= -go.opentelemetry.io/collector/config/configauth v0.102.1 h1:LuzijaZulMu4xmAUG8WA00ZKDlampH+ERjxclb40Q9g= -go.opentelemetry.io/collector/config/configauth v0.102.1/go.mod h1:kTzfI5fnbMJpm2wycVtQeWxFAtb7ns4HksSb66NIhX8= -go.opentelemetry.io/collector/config/configcompression v1.9.0 h1:B2q6XMO6xiF2s+14XjqAQHGY5UefR+PtkZ0WAlmSqpU= -go.opentelemetry.io/collector/config/configcompression v1.9.0/go.mod h1:6+m0GKCv7JKzaumn7u80A2dLNCuYf5wdR87HWreoBO0= -go.opentelemetry.io/collector/config/configgrpc v0.102.1 h1:6Plnfx+xw/JH8k11MkljGoysPfn1u7hHbO2evteOTeE= -go.opentelemetry.io/collector/config/configgrpc v0.102.1/go.mod h1:Kk3XOSar3QTzGDS8N8M38DVlOzUD7STS2obczO9q43I= -go.opentelemetry.io/collector/config/confighttp v0.102.1 h1:tPw1Xf2PfDdrXoBKLY5Sd4Dh8FNm5i+6DKuky9XraIM= -go.opentelemetry.io/collector/config/confighttp v0.102.1/go.mod h1:k4qscfjxuaDQmcAzioxmPujui9VSgW6oal3WLxp9CzI= -go.opentelemetry.io/collector/config/confignet v0.102.1 h1:nSiAFQMzNCO4sDBztUxY73qFw4Vh0hVePq8+3wXUHtU= -go.opentelemetry.io/collector/config/confignet v0.102.1/go.mod h1:pfOrCTfSZEB6H2rKtx41/3RN4dKs+X2EKQbw3MGRh0E= -go.opentelemetry.io/collector/config/configopaque v1.18.0 h1:aoEecgd5m8iZCX+S+iH6SK/lG6ULqCqtrtz7PeHw7vE= -go.opentelemetry.io/collector/config/configopaque v1.18.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4= -go.opentelemetry.io/collector/config/configretry v0.102.1 h1:J5/tXBL8P7d7HT5dxsp2H+//SkwDXR66Z9UTgRgtAzk= -go.opentelemetry.io/collector/config/configretry v0.102.1/go.mod h1:P+RA0IA+QoxnDn4072uyeAk1RIoYiCbxYsjpKX5eFC4= -go.opentelemetry.io/collector/config/configtelemetry v0.102.1 h1:f/CYcrOkaHd+COIJ2lWnEgBCHfhEycpbow4ZhrGwAlA= -go.opentelemetry.io/collector/config/configtelemetry v0.102.1/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40= -go.opentelemetry.io/collector/config/configtls v1.18.0 h1:IQemIIuryeHgrpBJMbLl+LgTxvFBbv7Hhi+0WwlxpCU= -go.opentelemetry.io/collector/config/configtls v1.18.0/go.mod h1:lD2dlDqeTKq7OecFwIZMufDaa8erSlEoHMJrFPHrZNw= -go.opentelemetry.io/collector/config/internal v0.102.1 h1:HFsFD3xpHUuNHb8/UTz5crJw1cMHzsJQf/86sgD44hw= -go.opentelemetry.io/collector/config/internal v0.102.1/go.mod h1:Vig3dfeJJnuRe1kBNpszBzPoj5eYnR51wXbeq36Zfpg= -go.opentelemetry.io/collector/confmap v0.102.1 h1:wZuH+d/P11Suz8wbp+xQCJ0BPE9m5pybtUe74c+rU7E= -go.opentelemetry.io/collector/confmap v0.102.1/go.mod h1:KgpS7UxH5rkd69CzAzlY2I1heH8Z7eNCZlHmwQBMxNg= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 h1:s0RxnaABoRxtfvUeimZ0OOsF83wD/EK1tR2N5GZyst0= -go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1/go.mod h1:ZwSMlOSIzmrrSSVNoMPDr21SQx7E52bZFMQJSOZ+EhY= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 h1:4KLw0pTChIqDfw0ckZ411aQDw98pu2dDOqgBHXfJm8M= -go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1/go.mod h1:f+IJBW0Sc96T79qj3GQtE1wQ0uWEwpslD785efKBl+c= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 h1:nPhOtUbJHfTDqZqtvU76HmEz9iV4O/4/DSCZdnm0mpY= -go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1/go.mod h1:eJnr6YDQiocmoRBvsKj33bIc4wysq5hy/jmOApv1dSM= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 h1:VsaGXqEUFost0mf2svhds6loYzPavkyY37nMQcqoTkc= -go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1/go.mod h1:lQocxKI32Zj1F3PR9UZfzykq50/mOI1mbyZ0729dphI= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 h1:rEhPTqkGAezaFxJ8y/BL5m4vKTK3ZSpn+VcVLKnZo7Q= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1/go.mod h1:GxUZM23m3u4vURw/At2zEKW+5GwcuCNsHJNT/Wq/cFI= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 h1:qmdaBIz0UnUKVitZzq+4HtO9zvRTwgNc/Q3b7kyf1NQ= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1/go.mod h1:nAckG/FkzAaPuwtEN2Na2+ij+2hdTjtXUtFBnlUqpFk= -go.opentelemetry.io/collector/connector v0.102.1 h1:7lEwXmhzqtyZwz2bBUHzwV/CZqA8bhPPVJOi0cm9+Fk= -go.opentelemetry.io/collector/connector v0.102.1/go.mod h1:DRlDYJXsFx1FKKxkdM2Ja52/xe+0bgmy0hA+wgKRUVI= -go.opentelemetry.io/collector/consumer v0.102.1 h1:0CkgHhxwx4lI/m+hWjh607xyjooW5CObZ8hFQy5vvo0= -go.opentelemetry.io/collector/consumer v0.102.1/go.mod h1:HoXqmrRV13jLnP3/Gg3fYNdRkDPoO7UW58hKiLyFF60= -go.opentelemetry.io/collector/exporter v0.102.1 h1:4VURYgBNJscxfMhZWitzcwA1cig5a6pH0xZSpdECDnM= -go.opentelemetry.io/collector/exporter v0.102.1/go.mod h1:1pmNxvrvvbWDW6PiGObICdj0eOSGV4Fzwpm5QA1GU54= -go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 h1:bOXE7u1iy0SKwH2mnVyIMKkvFIR9bn9iIm1Cf/CJlZU= -go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1/go.mod h1:4ya6xaUYvcXq9MQW0TbsR4QWkOJI02d/2Vt8plwdozA= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1 h1:9TaxHrkVtEdssDAHqV5yU9PARkFph7CvfLqC1wS6m+c= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1/go.mod h1:auKlkLfuUriyZ2CmV2dudJaVGB7ycZ+tTpypy2JNFEc= -go.opentelemetry.io/collector/extension v0.102.1 h1:gAvE3w15q+Vv0Tj100jzcDpeMTyc8dAiemHRtJbspLg= -go.opentelemetry.io/collector/extension v0.102.1/go.mod h1:XBxUOXjZpwYLZYOK5u3GWlbBTOKmzStY5eU1R/aXkIo= -go.opentelemetry.io/collector/extension/auth v0.102.1 h1:GP6oBmpFJjxuVruPb9X40bdf6PNu9779i8anxa+wW6U= -go.opentelemetry.io/collector/extension/auth v0.102.1/go.mod h1:U2JWz8AW1QXX2Ap3ofzo5Dn2fZU/Lglld97Vbh8BZS0= -go.opentelemetry.io/collector/extension/zpagesextension v0.102.1 h1:YV+ejCgOBJjACOi/l3ULeivOhh85FPE8T4UcFdWviyg= -go.opentelemetry.io/collector/extension/zpagesextension v0.102.1/go.mod h1:/CZXg9/C64k85/k4bc7NFbCNP/MiPUZucbxPUN04ny4= -go.opentelemetry.io/collector/featuregate v1.9.0 h1:mC4/HnR5cx/kkG1RKOQAvHxxg5Ktmd9gpFdttPEXQtA= -go.opentelemetry.io/collector/featuregate v1.9.0/go.mod h1:PsOINaGgTiFc+Tzu2K/X2jP+Ngmlp7YKGV1XrnBkH7U= -go.opentelemetry.io/collector/otelcol v0.102.1 h1:JdRG3ven+c5k703QpZG5bxJi4JJOnWaNP/EJvN+oYnI= -go.opentelemetry.io/collector/otelcol v0.102.1/go.mod h1:kHf9KBXOLZXajR1On8XJbBBGcgh2I2+/mVVroPzOLJU= -go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA= -go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= -go.opentelemetry.io/collector/pdata/testdata v0.102.1 h1:S3idZaJxy8M7mCC4PG4EegmtiSaOuh6wXWatKIui8xU= -go.opentelemetry.io/collector/pdata/testdata v0.102.1/go.mod h1:JEoSJTMgeTKyGxoMRy48RMYyhkA5vCCq/abJq9B6vXs= -go.opentelemetry.io/collector/processor v0.102.1 h1:79NWs7kTgmgxOIQacuZyDf+mYWuoJZS07SHwZT7sZ4Y= -go.opentelemetry.io/collector/processor v0.102.1/go.mod h1:sNM41tEHgv3YA/Dz9/6F8oCeObrqnKCGOMs7wS6Ldus= -go.opentelemetry.io/collector/receiver v0.102.1 h1:353t4U3o0RdU007JcQ4sRRzl72GHCJZwXDr8cCOcEbI= -go.opentelemetry.io/collector/receiver v0.102.1/go.mod h1:pYjMzUkvUlxJ8xt+VbI1to8HMtVlv8AW/K/2GQQOTB0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 h1:65/8lkVmOu6gwBw99W+QUQBeDC2qVTwlaiqy7/SpauY= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1/go.mod h1:0hmxfFSSqKJjRGvgYjp/XvptbAgLhLguwNgJqMp7zd0= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/collector/service v0.102.1 h1:Lg7qrC4Zctd/OAlkpdsaZaUY+jLEGLLnOigfBLP2GW8= -go.opentelemetry.io/collector/service v0.102.1/go.mod h1:L5Sh3461B1Zij7vpMMbi6M/SZicgrLB3UgbG0oUK0pA= -go.opentelemetry.io/contrib/bridges/prometheus v0.53.0 h1:BdkKDtcrHThgjcEia1737OUuFdP6xzBKAMx2sNZCkvE= -go.opentelemetry.io/contrib/bridges/prometheus v0.53.0/go.mod h1:ZkhVxcJgeXlL/lVyT/vxNHVFiSG5qOaDwYaSgD8IfZo= -go.opentelemetry.io/contrib/config v0.7.0 h1:b1rK5tGTuhhPirJiMxOcyQfZs76j2VapY6ODn3b2Dbs= -go.opentelemetry.io/contrib/config v0.7.0/go.mod h1:8tdiFd8N5etOi3XzBmAoMxplEzI3TcL8dU5rM5/xcOQ= -go.opentelemetry.io/contrib/exporters/autoexport v0.53.0 h1:13K+tY7E8GJInkrvRiPAhC0gi/7vKjzDNhtmCf+QXG8= -go.opentelemetry.io/contrib/exporters/autoexport v0.53.0/go.mod h1:lyQF6xQ4iDnMg4sccNdFs1zf62xd79YI8vZqKjOTwMs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0 h1:IjgxbomVrV9za6bRi8fWCNXENs0co37SZedQilP2hm0= -go.opentelemetry.io/contrib/propagators/b3 v1.27.0/go.mod h1:Dv9obQz25lCisDvvs4dy28UPh974CxkahRDUPsY7y9E= -go.opentelemetry.io/contrib/zpages v0.52.0 h1:MPgkMy0Cp3O5EdfVXP0ss3ujhEibysTM4eszx7E7d+E= -go.opentelemetry.io/contrib/zpages v0.52.0/go.mod h1:fqG5AFdoYru3A3DnhibVuaaEfQV2WKxE7fYE1jgDRwk= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0 h1:ao9aGGHd+G4YfjBpGs6vbkvt5hoC67STlJA9fCnOAcs= -go.opentelemetry.io/otel/bridge/opencensus v1.27.0/go.mod h1:uRvWtAAXzyVOST0WMPX5JHGBaAvBws+2F8PcC5gMnTk= -go.opentelemetry.io/otel/bridge/opentracing v1.26.0 h1:Q/dHj0DOhfLMAs5u5ucAbC7gy66x9xxsZRLpHCJ4XhI= -go.opentelemetry.io/otel/bridge/opentracing v1.26.0/go.mod h1:HfypvOw/8rqu4lXDhwaxVK1ibBAi1lTMXBHV9rywOCw= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.116.0 h1:Dscd6Nsnc7hjFQosO0SofcPQsXRfcj5N5PjQAslnmj4= +go.opentelemetry.io/collector v0.116.0/go.mod h1:Ug2hpW0SINPmJAGVEALRlux78NTZc3YXSuh5/Q/hFrA= +go.opentelemetry.io/collector/client v1.22.0 h1:AAUzHuqYQqxoNqacw1WXgGF/MxtBTwNZuhBvJIorgA0= +go.opentelemetry.io/collector/client v1.22.0/go.mod h1:wcCSdTwbDVNTycoqs7BiDNVj3e1Ta7EnWH2sAofKnEk= +go.opentelemetry.io/collector/component v0.116.0 h1:SQE1YeVfYCN7bw1n4hknUwJE5U/1qJL552sDhAdSlaA= +go.opentelemetry.io/collector/component v0.116.0/go.mod h1:MYgXFZWDTq0uPgF1mkLSFibtpNqksRVAOrmihckOQEs= +go.opentelemetry.io/collector/component/componentstatus v0.116.0 h1:wpgY0H2K9IPBzaNAvavKziK86VZ7TuNFQbS9OC4Z6Cs= +go.opentelemetry.io/collector/component/componentstatus v0.116.0/go.mod h1:ZRlVwHFMGNfcsAywEJqivOn5JzDZkpe3KZVSwMWu4tw= +go.opentelemetry.io/collector/component/componenttest v0.116.0 h1:UIcnx4Rrs/oDRYSAZNHRMUiYs2FBlwgV5Nc0oMYfR6A= +go.opentelemetry.io/collector/component/componenttest v0.116.0/go.mod h1:W40HaKPHdBFMVI7zzHE7dhdWC+CgAnAC9SmWetFBATY= +go.opentelemetry.io/collector/config/configauth v0.116.0 h1:8Y5y18fxilLobjDqNHO5FvGhC5HKYupWhT1DB3KrFaw= +go.opentelemetry.io/collector/config/configauth v0.116.0/go.mod h1:2JZ5PRvNZcrBvPz1SjPWCwFQx7qXGVginb7AkFQu0iU= +go.opentelemetry.io/collector/config/configcompression v1.22.0 h1:B4UCZjW2IVootcEL3iZFnCp8BOhLKEnWkHXHSo23ewk= +go.opentelemetry.io/collector/config/configcompression v1.22.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.116.0 h1:O8Y1X9wDH5dDdqDJ9kqoaMrxEauaDSJBg9aNC6QgB7g= +go.opentelemetry.io/collector/config/configgrpc v0.116.0/go.mod h1:RPrSQrr6xhIaAK2DdcECi142NjSo0npQfVQB3nommSo= +go.opentelemetry.io/collector/config/confighttp v0.116.0 h1:MLI88LmGzlN5D4pH6nFMg5hU3UCeTZb72iVx1lWb0c8= +go.opentelemetry.io/collector/config/confighttp v0.116.0/go.mod h1:iJzNYVOiE1V3lpOIZIkR3JJk3aX/RGp9+SEssJMJ/bY= +go.opentelemetry.io/collector/config/confignet v1.22.0 h1:SBEMHJZWD8J4qgFw1O+BEkXW9AWldDi6Fz5YeDeoU58= +go.opentelemetry.io/collector/config/confignet v1.22.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.22.0 h1:CJgsm/Ynr2JE5Y66hYJBdybjHs20ywHtBHiV1jlI4yE= +go.opentelemetry.io/collector/config/configopaque v1.22.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.22.0 h1:gKZeYPvCho1+pO6ePRXkloA2nKUUFnA+yBUSHfOzJPU= +go.opentelemetry.io/collector/config/configretry v1.22.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.116.0 h1:Vl49VCHQwBOeMswDpFwcl2HD8e9y94xlrfII3SR2VeQ= +go.opentelemetry.io/collector/config/configtelemetry v0.116.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.22.0 h1:Bu+GZMBYCufYDF72Wup+3jIGNsHuQUvZpMEPjllmYus= +go.opentelemetry.io/collector/config/configtls v1.22.0/go.mod h1:CYFyMvbf10EoWhoFG8EYyxzFy4jcIPGIRMc8/HWLNQM= +go.opentelemetry.io/collector/config/internal v0.116.0 h1:RG4tvK6Ozzk/SBUPyTnGD+miya0U7Z7GSpCY92qIEUY= +go.opentelemetry.io/collector/config/internal v0.116.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= +go.opentelemetry.io/collector/confmap v1.22.0 h1:ZKQzRuj5lKu+seKArAAZ1yPRroDPricaIVIREm/jr3w= +go.opentelemetry.io/collector/confmap v1.22.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/connector v0.116.0 h1:0Pz8RAXH1swLeA1fiyGGQWdv0Kemz4mbq+jnmd1TACE= +go.opentelemetry.io/collector/connector v0.116.0/go.mod h1:VQiZ2yW9KhZuF0yjDbvlgrAxm7+o8KQoxHxurQWLjKU= +go.opentelemetry.io/collector/connector/connectortest v0.116.0 h1:MdttvjqdZmp0ewW5IPCEtzzYmCqxrQxsjJ5m6gMVjzA= +go.opentelemetry.io/collector/connector/connectortest v0.116.0/go.mod h1:ko7aPyvAVf6SvRhWGZdPXSyk+7zPmsSCcMq0gYPMMIQ= +go.opentelemetry.io/collector/connector/xconnector v0.116.0 h1:fFJzrRco0g7HMkhnQ+ehlvicFf58vzsCLXnfuHyQM8w= +go.opentelemetry.io/collector/connector/xconnector v0.116.0/go.mod h1:m9oCk32zX6wDciYBpy+qMQkWKtOxb0gAkEUm8+trflY= +go.opentelemetry.io/collector/consumer v1.22.0 h1:QmfnNizyNZFt0uK3GG/EoT5h6PvZJ0dgVTc5hFEc1l0= +go.opentelemetry.io/collector/consumer v1.22.0/go.mod h1:tiz2khNceFAPokxxfzAuFfIpShBasMT2AL2Sbc7+m0I= +go.opentelemetry.io/collector/consumer/consumererror v0.116.0 h1:GRPnuvwxUeHKVTRzy35di8OFlxypY4YWrK+1nWMsExM= +go.opentelemetry.io/collector/consumer/consumererror v0.116.0/go.mod h1:OvQvQ2V7sHT4Vz+1/4mwdEajWZNoFUsY1NhOM8rGvXo= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.116.0 h1:sSPDz4Qu7H86SrxtE6sQHDuWjFkTWHdKyGdsxhjZzcw= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.116.0/go.mod h1:0K7jbkCWtydn1IA3JwAUJeLVL79tJTxWoXR2gYTnCk0= +go.opentelemetry.io/collector/consumer/consumertest v0.116.0 h1:pIVR7FtQMNAzfxBUSMEIC2dX5Lfo3O9ZBfx+sAwrrrM= +go.opentelemetry.io/collector/consumer/consumertest v0.116.0/go.mod h1:cV3cNDiPnls5JdhnOJJFVlclrClg9kPs04cXgYP9Gmk= +go.opentelemetry.io/collector/consumer/xconsumer v0.116.0 h1:ZrWvq7HumB0jRYmS2ztZ3hhXRNpUVBWPKMbPhsVGmZM= +go.opentelemetry.io/collector/consumer/xconsumer v0.116.0/go.mod h1:C+VFMk8vLzPun6XK8aMts6h4RaDjmzXHCPaiOxzRQzQ= +go.opentelemetry.io/collector/exporter v0.116.0 h1:Ps8sLPiGqJ4XGfmAVR6sxrtNBQOqAaWJvgqEOGxTvUU= +go.opentelemetry.io/collector/exporter v0.116.0/go.mod h1:9alTWZILqY8Y3L/YLdJHFA0sx/LJDgZZjng0PHsIJkU= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.116.0 h1:BzMvZf9RKRcQPFjGnQSEWvfI5Er9kXfzyZ/4GHkUTa4= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.116.0/go.mod h1:RCbA3MNnZPTy2mViwQxMlnrq5moJ1XgetNQx2XHIpaI= +go.opentelemetry.io/collector/exporter/exportertest v0.116.0 h1:2XEiNkBtvOq2+KzjM3OA92vlDATAi1Nn+xT9GT74QQc= +go.opentelemetry.io/collector/exporter/exportertest v0.116.0/go.mod h1:t3CYc//OqP5pxpIN/5tYJhVP/mmtyoc5vHkBCau2IkM= +go.opentelemetry.io/collector/exporter/otlpexporter v0.116.0 h1:2yA8fUW0R0hml2OFxfCmSdtdqpfCLqBGazJ8eALajgU= +go.opentelemetry.io/collector/exporter/otlpexporter v0.116.0/go.mod h1:hbK5ggV3hjsjdifV7oUQKhbjg0ijgIeK9BHixTAzuYo= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.116.0 h1:NKd1T+TfCss0Qo0RlkN2C30AO2/6ceHS0q+97hpQhI8= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.116.0/go.mod h1:ev8EFap0FyqEUe02sct8Aon4ctlCANcsHrIxROH1jTU= +go.opentelemetry.io/collector/exporter/xexporter v0.116.0 h1:z97GOTSJu4qMkp21yeUWAo6gskMEJi2j8vdnakLcKgI= +go.opentelemetry.io/collector/exporter/xexporter v0.116.0/go.mod h1:9wWrMBpX6/s3dSx4mLf+QeEA8ZpYts4GdQkv4BOXEEg= +go.opentelemetry.io/collector/extension v0.116.0 h1:/PYrsAqb87XlC1Cra7I3mU6CDs+TAjqj7LO/9tXX9qk= +go.opentelemetry.io/collector/extension v0.116.0/go.mod h1:OF8pL6ioyT+f2V0CsEaM1EAmqaEMNCIgw7DS4agcOcc= +go.opentelemetry.io/collector/extension/auth v0.116.0 h1:oolKkFBIS4vhJ4ZWTD4Bp+35ATBf7dBsln63RKWAJm0= +go.opentelemetry.io/collector/extension/auth v0.116.0/go.mod h1:3WeZgIiiP7wcB+tID4G3ml6J/R2oJ359PxQh/pUFnSk= +go.opentelemetry.io/collector/extension/auth/authtest v0.116.0 h1:KcMvjb4R0wpkmmi7EOk7zT5sgl7uwXY/VQfMEUVYcLM= +go.opentelemetry.io/collector/extension/auth/authtest v0.116.0/go.mod h1:zyWTdh+CUKh7BbszTWUWp806NA6EDyix77O4Q6XaOA8= +go.opentelemetry.io/collector/extension/experimental/storage v0.116.0 h1:Pb0ljtJMtsdiJoLOWbtVIYAViLkcZUF3V9MUNHyzn1c= +go.opentelemetry.io/collector/extension/experimental/storage v0.116.0/go.mod h1:AQgDz5IJB4d9PExwV6RTlYkiVGp05/+/TAR9gCJpPJA= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.116.0 h1:bXDgoiDiwFfrO5S6q1Wpe0su303CW4Qxthoy4CxG6eg= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.116.0/go.mod h1:uFZlAPxk2+s6zk1W+u22DgxxSDugWy0QaY5kQzjNy7Q= +go.opentelemetry.io/collector/extension/extensiontest v0.116.0 h1:NEPis256V4pFVocdZH6gOdsGDueyOe9vvx/BE9QxMf0= +go.opentelemetry.io/collector/extension/extensiontest v0.116.0/go.mod h1:rpb4W2OimGXY5Oalk/NCemoPhgM03JZxBjXvlIveH5I= +go.opentelemetry.io/collector/extension/zpagesextension v0.116.0 h1:D0VY/ESPwkREfMaqJleGVTiCYoCs3MDipU93ywaIn68= +go.opentelemetry.io/collector/extension/zpagesextension v0.116.0/go.mod h1:MqkQSYKarMrjVSyGrAes1FBcsW6HDDPj/ouvcDvSjfk= +go.opentelemetry.io/collector/featuregate v1.22.0 h1:1TUcdqA5VpEsX1Lrr6GG15CptZxDXxiu5AXgwpeNSR4= +go.opentelemetry.io/collector/featuregate v1.22.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.116.0 h1:KKFb3u8iLk4V5GYz7eVpw8QgEdob5koX6GNEbaoDQJY= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.116.0/go.mod h1:YyWHUWeSnRmy2CF7kPHpwJ6qkVjufbLZ8PYuY0Ph0o8= +go.opentelemetry.io/collector/internal/sharedcomponent v0.116.0 h1:5QhpnwgogQhBrcsOjk1Yhkugopj043XlSDB1lpHKCGo= +go.opentelemetry.io/collector/internal/sharedcomponent v0.116.0/go.mod h1:wMQM1Sqbj6J6jNVBb5hawkCfmqrLJ0hAeDKkalUfrEY= +go.opentelemetry.io/collector/otelcol v0.116.0 h1:8NoPEY2uAemwNzUcMxiZuuI8w/Ff+0p1DXXk6NVEGlk= +go.opentelemetry.io/collector/otelcol v0.116.0/go.mod h1:c/EhsBdHmC44SxhWMi7pTlYB+I6GBSJNBsj9NCwXMxM= +go.opentelemetry.io/collector/pdata v1.22.0 h1:3yhjL46NLdTMoP8rkkcE9B0pzjf2973crn0KKhX5UrI= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0 h1:iE6lqkO7Hi6lTIIml1RI7yQ55CKqW12R2qHinwF5Zuk= +go.opentelemetry.io/collector/pdata/pprofile v0.116.0/go.mod h1:xQiPpjzIiXRFb+1fPxUy/3ygEZgo0Bu/xmLKOWu8vMQ= +go.opentelemetry.io/collector/pdata/testdata v0.116.0 h1:zmn1zpeX2BvzL6vt2dBF4OuAyFF2ml/OXcqflNgFiP0= +go.opentelemetry.io/collector/pdata/testdata v0.116.0/go.mod h1:ytWzICFN4XTDP6o65B4+Ed52JGdqgk9B8CpLHCeCpMo= +go.opentelemetry.io/collector/pipeline v0.116.0 h1:o8eKEuWEszmRpfShy7ElBoQ3Jo6kCi9ucm3yRgdNb9s= +go.opentelemetry.io/collector/pipeline v0.116.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/xpipeline v0.116.0 h1:PFNy+Il/kqPdPL9rhG+1kG63Aauya+FhBKGXoB6ivxU= +go.opentelemetry.io/collector/pipeline/xpipeline v0.116.0/go.mod h1:bVn9V4TGyeXi58/JDkeXCuKtc+V+qcOoTl8hNpV0qa8= +go.opentelemetry.io/collector/processor v0.116.0 h1:Kyu4tPzTdWNHtZjcxvI/bGNAgyv8L8Kem2r/Mk4IDAw= +go.opentelemetry.io/collector/processor v0.116.0/go.mod h1:+/Ugy48RAxlZEXmN2cw51W8t5wdHS9No+GAoP+moskk= +go.opentelemetry.io/collector/processor/processortest v0.116.0 h1:+IqNEVEE0E2MsO2g7+Y/9dz35sDuvAXRXrLts9NdXrA= +go.opentelemetry.io/collector/processor/processortest v0.116.0/go.mod h1:DLaQDBxzgeeaUO0ULMn/efos9PmHZkmYCHuxwCsiVHI= +go.opentelemetry.io/collector/processor/xprocessor v0.116.0 h1:iin/UwuWvSLB7ZNfINFUYbZ5lxIi1NjZ2brkyyFdiRA= +go.opentelemetry.io/collector/processor/xprocessor v0.116.0/go.mod h1:cnA43/XpKDbaOmd8buqKp/LGJ2l/OoCqbR//u5DMfn8= +go.opentelemetry.io/collector/receiver v0.116.0 h1:voiBluWLwe4lbyLVwxloK6CudqqszWF+bgYKHuxnETU= +go.opentelemetry.io/collector/receiver v0.116.0/go.mod h1:zb6m8l+knUuN62ASCDqQPIm9punK8PEX1mFrF/yzMI8= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.116.0 h1:81u+wJsQp9VfrAdVpVvcV31YAs8qsZp/UnMmG9YCJ0A= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.116.0/go.mod h1:JXIwjUyh3Q+gNvr7YE937se4/E4UJ/oz5z9xbJeVl3M= +go.opentelemetry.io/collector/receiver/receivertest v0.116.0 h1:ZF4QVcots0OUiutblkyPR02pc+g7v1QaJSFW8tOzHoQ= +go.opentelemetry.io/collector/receiver/receivertest v0.116.0/go.mod h1:7GGvtHhW3o6457/wGtSWXJtCtlW6VGFUZSlf6wboNTw= +go.opentelemetry.io/collector/receiver/xreceiver v0.116.0 h1:Kc+ixqgMjU2sHhzNrFn5TttVNiJlJwTLL3sQrM9uH6s= +go.opentelemetry.io/collector/receiver/xreceiver v0.116.0/go.mod h1:H2YGSNFoMbWMIDvB8tzkReHSVqvogihjtet+ppHfYv8= +go.opentelemetry.io/collector/semconv v0.116.0 h1:63xCZomsKJAWmKGWD3lnORiE3WKW6AO4LjnzcHzGx3Y= +go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.116.0 h1:1rFmax3ILOlQq4bW//0VKRcG/HmN2rxrgfTybn6bisU= +go.opentelemetry.io/collector/service v0.116.0/go.mod h1:CzQzj72mWr5arag+q8uUWGRuUkenNo5Gw9oA97SFZ78= +go.opentelemetry.io/contrib/bridges/otelzap v0.8.0 h1:4jqXEd0FGULFBy1bF1ledBePc0Ssu8YVddTgr8BXDTc= +go.opentelemetry.io/contrib/bridges/otelzap v0.8.0/go.mod h1:nrDogEQCtEOQ4jAiN4uHIE0BqicDF9bMyepgK1pIbP4= +go.opentelemetry.io/contrib/bridges/prometheus v0.58.0 h1:gQFwWiqm4JUvOjpdmyU0di+2pVQ8QNpk1Ak/54Y6NcY= +go.opentelemetry.io/contrib/bridges/prometheus v0.58.0/go.mod h1:CNyFi9PuvHtEJNmMFHaXZMuA4XmgRXIqpFcHdqzLvVU= +go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= +go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= +go.opentelemetry.io/contrib/exporters/autoexport v0.58.0 h1:qVsDVgZd/bC6ZKDOHSjILpm0T/BWvASC9cQU3GYga78= +go.opentelemetry.io/contrib/exporters/autoexport v0.58.0/go.mod h1:bAv7mY+5qTsFPFaRpr75vDOocX09I36QH4Rg0slEG/U= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/contrib/propagators/b3 v1.33.0 h1:ig/IsHyyoQ1F1d6FUDIIW5oYpsuTVtN16AyGOgdjAHQ= +go.opentelemetry.io/contrib/propagators/b3 v1.33.0/go.mod h1:EsVYoNy+Eol5znb6wwN3XQTILyjl040gUpEnUSNZfsk= +go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= +go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/bridge/opencensus v1.33.0 h1:sGcK0Wif7sPG3GZG9z8b+tpRZiUHwv27WgmsaZ1wgzM= +go.opentelemetry.io/otel/bridge/opencensus v1.33.0/go.mod h1:LXJy68HiJRu+2yJmVnbDn/F9JS9Kxfsj5WpA5t5NfRY= +go.opentelemetry.io/otel/bridge/opentracing v1.33.0 h1:eH88qvKdY7ns7Xu6WlJBQNOzZ3MVvBR6tEl2euaYS9w= +go.opentelemetry.io/otel/bridge/opentracing v1.33.0/go.mod h1:FNai/nhRSn/kHyv+V1zaf/30BU8hO/DXo0MvV0PaUS8= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 h1:zBPZAISA9NOc5cE8zydqDiS0itvg/P/0Hn9m72a5gvM= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0/go.mod h1:gcj2fFjEsqpV3fXuzAA+0Ze1p2/4MJ4T7d77AmkvueQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0 h1:2Ewsda6hejmbhGFyUvWZjUThC98Cf8Zy6g0zkIimOng= -go.opentelemetry.io/otel/exporters/prometheus v0.50.0/go.mod h1:pMm5PkUo5YwbLiuEf7t2xg4wbP0/eSJrMxIMxKosynY= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 h1:0MH3f8lZrflbUWXVxyBg/zviDFdGE062uKh5+fu8Vv0= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0/go.mod h1:Vh68vYiHY5mPdekTr0ox0sALsqjoVy0w3Os278yX5SQ= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= -go.opentelemetry.io/otel/log v0.4.0 h1:/vZ+3Utqh18e8TPjuc3ecg284078KWrR8BRz+PQAj3o= -go.opentelemetry.io/otel/log v0.4.0/go.mod h1:DhGnQvky7pHy82MIRV43iXh3FlKN8UUKftn0KbLOq6I= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= -go.opentelemetry.io/otel/sdk/log v0.4.0 h1:1mMI22L82zLqf6KtkjrRy5BbagOTWdJsqMY/HSqILAA= -go.opentelemetry.io/otel/sdk/log v0.4.0/go.mod h1:AYJ9FVF0hNOgAVzUG/ybg/QttnXhUePWAupmCqtdESo= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.9.0 h1:gA2gh+3B3NDvRFP30Ufh7CC3TtJRbUSf2TTD0LbCagw= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.9.0/go.mod h1:smRTR+02OtrVGjvWE1sQxhuazozKc/BXvvqqnmOxy+s= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.9.0 h1:Za0Z/j9Gf3Z9DKQ1choU9xI2noCxlkcyFFP2Ob3miEQ= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.9.0/go.mod h1:jMRB8N75meTNjDFQyJBA/2Z9en21CsxwMctn08NHY6c= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 h1:7F29RDmnlqk6B5d+sUqemt8TBfDqxryYW5gX6L74RFA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0/go.mod h1:ZiGDq7xwDMKmWDrN1XsXAj0iC7hns+2DhxBFSncNHSE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0 h1:bSjzTvsXZbLSWU8hnZXcKmEVaJjjnandxD0PxThhVU8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0/go.mod h1:aj2rilHL8WjXY1I5V+ra+z8FELtk681deydgYT8ikxU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/exporters/prometheus v0.55.0 h1:sSPw658Lk2NWAv74lkD3B/RSDb+xRFx46GjkrL3VUZo= +go.opentelemetry.io/otel/exporters/prometheus v0.55.0/go.mod h1:nC00vyCmQixoeaxF6KNyP42II/RHa9UdruK02qBmHvI= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.9.0 h1:iI15wfQb5ZtAVTdS5WROxpYmw6Kjez3hT9SuzXhrgGQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.9.0/go.mod h1:yepwlNzVVxHWR5ugHIrll+euPQPq4pvysHTDr/daV9o= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.33.0 h1:FiOTYABOX4tdzi8A0+mtzcsTmi6WBOxk66u0f1Mj9Gs= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.33.0/go.mod h1:xyo5rS8DgzV0Jtsht+LCEMwyiDbjpsxBpWETwFRF0/4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 h1:W5AWUn/IVe8RFb5pZx1Uh9Laf/4+Qmm4kJL5zPuvR+0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0/go.mod h1:mzKxJywMNBdEX8TSJais3NnsVZUaJ+bAy6UxPTng2vk= +go.opentelemetry.io/otel/log v0.9.0 h1:0OiWRefqJ2QszpCiqwGO0u9ajMPe17q6IscQvvp3czY= +go.opentelemetry.io/otel/log v0.9.0/go.mod h1:WPP4OJ+RBkQ416jrFCQFuFKtXKD6mOoYCQm6ykK8VaU= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/log v0.9.0 h1:YPCi6W1Eg0vwT/XJWsv2/PaQ2nyAJYuF7UUjQSBe3bc= +go.opentelemetry.io/otel/sdk/log v0.9.0/go.mod h1:y0HdrOz7OkXQBuc2yjiqnEHc+CRKeVhRE3hx4RwTmV4= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -1108,8 +1155,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1133,8 +1180,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1175,16 +1222,16 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1265,9 +1312,6 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1339,16 +1383,16 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= +golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= -gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1409,10 +1453,10 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b h1:dSTjko30weBaMj3eERKc0ZVXW4GudCswM3m+P++ukU0= google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY= -google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1432,8 +1476,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/integration/bench/config.yaml b/integration/bench/config.yaml index 56f7d085c21..6a0244744a0 100644 --- a/integration/bench/config.yaml +++ b/integration/bench/config.yaml @@ -7,6 +7,7 @@ server: distributor: receivers: zipkin: + endpoint: "tempo:9411" ingester: lifecycler: @@ -34,4 +35,4 @@ storage: queue_depth: 100 overrides: - max_traces_per_user: 50000 \ No newline at end of file + max_traces_per_user: 50000 diff --git a/integration/e2e/config-cross-cluster-a.yaml b/integration/e2e/config-cross-cluster-a.yaml index bfc8b023dd1..141a40af0df 100644 --- a/integration/e2e/config-cross-cluster-a.yaml +++ b/integration/e2e/config-cross-cluster-a.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor-a:14250" ingester: lifecycler: @@ -42,4 +43,4 @@ memberlist: querier: secondary_ingester_ring: "ring-b" frontend_worker: - frontend_address: tempo_active_active-query-frontend-a:9095 \ No newline at end of file + frontend_address: tempo_active_active-query-frontend-a:9095 diff --git a/integration/e2e/config-cross-cluster-b.yaml b/integration/e2e/config-cross-cluster-b.yaml index 3e363b6e5ce..0401722873f 100644 --- a/integration/e2e/config-cross-cluster-b.yaml +++ b/integration/e2e/config-cross-cluster-b.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor-b:14250" ingester: lifecycler: @@ -42,4 +43,4 @@ memberlist: querier: secondary_ingester_ring: "ring-a" frontend_worker: - frontend_address: tempo_active_active-query-frontend-b:9095 \ No newline at end of file + frontend_address: tempo_active_active-query-frontend-b:9095 diff --git a/integration/e2e/config-encodings.tmpl.yaml b/integration/e2e/config-encodings.tmpl.yaml index eb5f54e53a5..6b54f16abd3 100644 --- a/integration/e2e/config-encodings.tmpl.yaml +++ b/integration/e2e/config-encodings.tmpl.yaml @@ -14,10 +14,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/config-https.yaml b/integration/e2e/config-https.yaml index 70504884488..4c050ab4fad 100644 --- a/integration/e2e/config-https.yaml +++ b/integration/e2e/config-https.yaml @@ -25,6 +25,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" ingester: lifecycler: @@ -76,4 +77,4 @@ querier: tls_enabled: true tls_insecure_skip_verify: true tls_cipher_suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - tls_min_version: VersionTLS12 \ No newline at end of file + tls_min_version: VersionTLS12 diff --git a/integration/e2e/config-limits-429.yaml b/integration/e2e/config-limits-429.yaml index 4b3f2765483..32825ac4d24 100644 --- a/integration/e2e/config-limits-429.yaml +++ b/integration/e2e/config-limits-429.yaml @@ -9,6 +9,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" overrides: defaults: diff --git a/integration/e2e/config-limits-partial-success.yaml b/integration/e2e/config-limits-partial-success.yaml index fa4081a9364..cadd1faf844 100644 --- a/integration/e2e/config-limits-partial-success.yaml +++ b/integration/e2e/config-limits-partial-success.yaml @@ -8,6 +8,7 @@ distributor: otlp: protocols: grpc: + endpoint: "tempo:4317" overrides: max_bytes_per_trace: 600 diff --git a/integration/e2e/config-limits-query.yaml b/integration/e2e/config-limits-query.yaml index 8f313b35f18..1574d11dd7d 100644 --- a/integration/e2e/config-limits-query.yaml +++ b/integration/e2e/config-limits-query.yaml @@ -8,6 +8,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" compactor: compaction: @@ -46,4 +47,4 @@ storage: path: /var/tempo/wal pool: max_workers: 10 - queue_depth: 100 \ No newline at end of file + queue_depth: 100 diff --git a/integration/e2e/config-limits.yaml b/integration/e2e/config-limits.yaml index e46e57866a7..d5d97904d0e 100644 --- a/integration/e2e/config-limits.yaml +++ b/integration/e2e/config-limits.yaml @@ -9,10 +9,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" http: + endpoint: "tempo:4318" overrides: defaults: diff --git a/integration/e2e/config-metrics-generator-targetinfo.yaml b/integration/e2e/config-metrics-generator-targetinfo.yaml index cf54bd1dbcf..c496251a800 100644 --- a/integration/e2e/config-metrics-generator-targetinfo.yaml +++ b/integration/e2e/config-metrics-generator-targetinfo.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" log_received_spans: enabled: true @@ -48,4 +49,4 @@ overrides: max_active_series: 1000 processor: span_metrics: - enable_target_info: true # seconds \ No newline at end of file + enable_target_info: true # seconds diff --git a/integration/e2e/config-metrics-generator.yaml b/integration/e2e/config-metrics-generator.yaml index 06dfc5ff39d..ede1402c61f 100644 --- a/integration/e2e/config-metrics-generator.yaml +++ b/integration/e2e/config-metrics-generator.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" log_received_spans: enabled: true @@ -45,4 +46,4 @@ overrides: defaults: metrics_generator: processors: [service-graphs, span-metrics] - max_active_series: 1000 \ No newline at end of file + max_active_series: 1000 diff --git a/integration/e2e/config-multi-tenant-local.yaml b/integration/e2e/config-multi-tenant-local.yaml index 4d9736b301b..4ded5c61939 100644 --- a/integration/e2e/config-multi-tenant-local.yaml +++ b/integration/e2e/config-multi-tenant-local.yaml @@ -16,10 +16,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/config-plugin-test.yaml b/integration/e2e/config-plugin-test.yaml index 4d69e3cf42f..6054dc05a21 100644 --- a/integration/e2e/config-plugin-test.yaml +++ b/integration/e2e/config-plugin-test.yaml @@ -13,10 +13,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/config-query-range.yaml b/integration/e2e/config-query-range.yaml index faa5e5be6e0..286cf3a3627 100644 --- a/integration/e2e/config-query-range.yaml +++ b/integration/e2e/config-query-range.yaml @@ -15,10 +15,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true diff --git a/integration/e2e/deployments/config-all-in-one-azurite.yaml b/integration/e2e/deployments/config-all-in-one-azurite.yaml index 3def0ff6f1b..c284699bd6c 100644 --- a/integration/e2e/deployments/config-all-in-one-azurite.yaml +++ b/integration/e2e/deployments/config-all-in-one-azurite.yaml @@ -14,7 +14,7 @@ distributor: jaeger: protocols: grpc: - + endpoint: "tempo:14250" ingester: lifecycler: address: 127.0.0.1 diff --git a/integration/e2e/deployments/config-all-in-one-gcs.yaml b/integration/e2e/deployments/config-all-in-one-gcs.yaml index efffae085c9..70028bc20a7 100644 --- a/integration/e2e/deployments/config-all-in-one-gcs.yaml +++ b/integration/e2e/deployments/config-all-in-one-gcs.yaml @@ -14,6 +14,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" ingester: lifecycler: diff --git a/integration/e2e/deployments/config-all-in-one-local.yaml b/integration/e2e/deployments/config-all-in-one-local.yaml index c0133165d7b..2f7e5910216 100644 --- a/integration/e2e/deployments/config-all-in-one-local.yaml +++ b/integration/e2e/deployments/config-all-in-one-local.yaml @@ -15,10 +15,13 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" otlp: protocols: grpc: + endpoint: "tempo:4317" zipkin: + endpoint: "tempo:9411" log_received_spans: enabled: true log_discarded_spans: diff --git a/integration/e2e/deployments/config-all-in-one-s3.yaml b/integration/e2e/deployments/config-all-in-one-s3.yaml index a429b183628..2f7d19035b9 100644 --- a/integration/e2e/deployments/config-all-in-one-s3.yaml +++ b/integration/e2e/deployments/config-all-in-one-s3.yaml @@ -14,6 +14,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "tempo:14250" ingester: lifecycler: diff --git a/integration/e2e/deployments/config-microservices.tmpl.yaml b/integration/e2e/deployments/config-microservices.tmpl.yaml index b6860dbb29e..1b9e2eef330 100644 --- a/integration/e2e/deployments/config-microservices.tmpl.yaml +++ b/integration/e2e/deployments/config-microservices.tmpl.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" ingester: lifecycler: @@ -45,4 +46,4 @@ memberlist: querier: frontend_worker: - frontend_address: tempo_e2e-query-frontend:9095 \ No newline at end of file + frontend_address: tempo_e2e-query-frontend:9095 diff --git a/integration/e2e/deployments/config-scalable-single-binary.yaml b/integration/e2e/deployments/config-scalable-single-binary.yaml index f50568fda7e..5ccdd15ca80 100644 --- a/integration/e2e/deployments/config-scalable-single-binary.yaml +++ b/integration/e2e/deployments/config-scalable-single-binary.yaml @@ -6,6 +6,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "0.0.0.0:14250" ingester: lifecycler: @@ -45,4 +46,4 @@ memberlist: querier: frontend_worker: - frontend_address: tempo-1:9095 \ No newline at end of file + frontend_address: tempo-1:9095 diff --git a/integration/e2e/receivers_test.go b/integration/e2e/receivers_test.go index 5822dc39190..552509eb966 100644 --- a/integration/e2e/receivers_test.go +++ b/integration/e2e/receivers_test.go @@ -7,7 +7,6 @@ import ( "github.com/grafana/dskit/user" "github.com/grafana/e2e" - "github.com/grafana/tempo/integration/util" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +22,7 @@ import ( tracenoop "go.opentelemetry.io/otel/trace/noop" "go.uber.org/zap" + "github.com/grafana/tempo/integration/util" "github.com/grafana/tempo/pkg/httpclient" tempoUtil "github.com/grafana/tempo/pkg/util" "github.com/grafana/tempo/pkg/util/test" @@ -86,9 +86,9 @@ func TestReceivers(t *testing.T) { t.Run(tc.name, func(t *testing.T) { // create exporter logger, _ := zap.NewDevelopment() - exporter, err := tc.factory.CreateTracesExporter( + exporter, err := tc.factory.CreateTraces( context.Background(), - exporter.CreateSettings{ + exporter.Settings{ TelemetrySettings: component.TelemetrySettings{ Logger: logger, TracerProvider: tracenoop.NewTracerProvider(), diff --git a/integration/e2e/serverless/config-serverless-gcr.yaml b/integration/e2e/serverless/config-serverless-gcr.yaml index 76e4fa7d103..9bb91e5e208 100644 --- a/integration/e2e/serverless/config-serverless-gcr.yaml +++ b/integration/e2e/serverless/config-serverless-gcr.yaml @@ -11,6 +11,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" ingester: lifecycler: diff --git a/integration/e2e/serverless/config-serverless-lambda.yaml b/integration/e2e/serverless/config-serverless-lambda.yaml index cbd4435909d..f227891e3cc 100644 --- a/integration/e2e/serverless/config-serverless-lambda.yaml +++ b/integration/e2e/serverless/config-serverless-lambda.yaml @@ -11,6 +11,7 @@ distributor: jaeger: protocols: grpc: + endpoint: "distributor:14250" ingester: lifecycler: diff --git a/integration/microservices/tempo.yaml b/integration/microservices/tempo.yaml index c3ce244ba27..d4bcf6e607c 100644 --- a/integration/microservices/tempo.yaml +++ b/integration/microservices/tempo.yaml @@ -2,10 +2,13 @@ compactor: null distributor: receivers: zipkin: + endpoint: "distributor:9411" jaeger: protocols: - thrift_http: null - opencensus: null + thrift_http: + endpoint: "distributor:14268" + opencensus: + endpoint: "distributor:55678" ingester: trace_idle_period: 1s lifecycler: diff --git a/integration/util/util.go b/integration/util/util.go index cd3248b21ec..8e81e2b8887 100644 --- a/integration/util/util.go +++ b/integration/util/util.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/dskit/backoff" "github.com/grafana/e2e" - "github.com/grafana/tempo/pkg/model/trace" jaeger_grpc "github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc" thrift "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/stretchr/testify/assert" @@ -39,6 +38,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "github.com/grafana/tempo/pkg/httpclient" + "github.com/grafana/tempo/pkg/model/trace" "github.com/grafana/tempo/pkg/tempopb" tempoUtil "github.com/grafana/tempo/pkg/util" ) @@ -47,7 +47,7 @@ const ( image = "tempo:latest" debugImage = "tempo-debug:latest" queryImage = "tempo-query:latest" - jaegerImage = "jaegertracing/jaeger-query:1.60" + jaegerImage = "jaegertracing/jaeger-query:1.64.0" ) // GetExtraArgs returns the extra args to pass to the Docker command used to run Tempo. @@ -348,9 +348,9 @@ func NewOtelGRPCExporter(endpoint string) (exporter.Traces, error) { }, } logger, _ := zap.NewDevelopment() - return factory.CreateTracesExporter( + return factory.CreateTraces( context.Background(), - exporter.CreateSettings{ + exporter.Settings{ TelemetrySettings: component.TelemetrySettings{ Logger: logger, TracerProvider: tnoop.NewTracerProvider(), diff --git a/modules/distributor/forwarder/forwarder.go b/modules/distributor/forwarder/forwarder.go index bf2dd21d739..87c3a5b1a42 100644 --- a/modules/distributor/forwarder/forwarder.go +++ b/modules/distributor/forwarder/forwarder.go @@ -86,7 +86,7 @@ type FilterForwarder struct { func NewFilterForwarder(cfg FilterConfig, next Forwarder, logLevel dslog.Level) (*FilterForwarder, error) { factory := filterprocessor.NewFactory() - set := processor.CreateSettings{ + set := processor.Settings{ ID: component.ID{}, TelemetrySettings: component.TelemetrySettings{ Logger: newLogger(logLevel), @@ -102,7 +102,7 @@ func NewFilterForwarder(cfg FilterConfig, next Forwarder, logLevel dslog.Level) SpanEventConditions: cfg.Traces.SpanEventConditions, }, } - fp, err := factory.CreateTracesProcessor(context.Background(), set, fpCfg, consumerToForwarderAdapter{forwarder: next}) + fp, err := factory.CreateTraces(context.Background(), set, fpCfg, consumerToForwarderAdapter{forwarder: next}) if err != nil { return nil, fmt.Errorf("failed to create filter processor: %w", err) } @@ -173,11 +173,6 @@ func (f *FilterForwarder) GetExtensions() map[component.ID]extension.Extension { return nil } -// GetExporters implements component.Host -func (f *FilterForwarder) GetExporters() map[component.DataType]map[component.ID]component.Component { - return nil -} - type consumerToForwarderAdapter struct { forwarder Forwarder } diff --git a/modules/distributor/receiver/metrics_provider.go b/modules/distributor/receiver/metrics_provider.go index 4c3196c3ddd..f15af449664 100644 --- a/modules/distributor/receiver/metrics_provider.go +++ b/modules/distributor/receiver/metrics_provider.go @@ -25,6 +25,12 @@ import ( "go.opentelemetry.io/otel/metric/noop" ) +const ( + // These metrics are defined here: https://github.com/open-telemetry/opentelemetry-collector/blob/release/v0.116.x/receiver/receiverhelper/internal/metadata/generated_telemetry.go + otelcolAcceptedSpansMetricName = "otelcol_receiver_accepted_spans" + otelcolRefusedSpansMetricName = "otelcol_receiver_refused_spans" +) + var ( // Compile-time check this implements the OpenTelemetry API. @@ -83,7 +89,7 @@ type Meter struct { // Int64Counter returns a Counter used to record int64 measurements func (m Meter) Int64Counter(name string, _ ...metric.Int64CounterOption) (metric.Int64Counter, error) { switch name { - case "receiver_accepted_spans", "receiver_refused_spans": + case otelcolAcceptedSpansMetricName, otelcolRefusedSpansMetricName: return Int64Counter{Name: name, metrics: m.metrics}, nil default: return noop.Int64Counter{}, nil @@ -118,9 +124,9 @@ func (r Int64Counter) Add(_ context.Context, value int64, options ...metric.AddO } switch r.Name { - case "receiver_accepted_spans": + case otelcolAcceptedSpansMetricName: r.metrics.receiverAcceptedSpans.WithLabelValues(receiver, transport).Add(float64(value)) - case "receiver_refused_spans": + case otelcolRefusedSpansMetricName: r.metrics.receiverRefusedSpans.WithLabelValues(receiver, transport).Add(float64(value)) } } diff --git a/modules/distributor/receiver/shim.go b/modules/distributor/receiver/shim.go index d43544f0c3a..b11f3f0455f 100644 --- a/modules/distributor/receiver/shim.go +++ b/modules/distributor/receiver/shim.go @@ -272,17 +272,15 @@ func New(receiverCfg map[string]interface{}, pusher TracesPusher, middleware Mid cfg = jaegerRecvCfg } - params := receiver.CreateSettings{ + params := receiver.Settings{ ID: component.NewIDWithName(nopType, fmt.Sprintf("%s_receiver", componentID.Type().String())), TelemetrySettings: component.TelemetrySettings{ Logger: zapLogger, TracerProvider: traceProvider, MeterProvider: meterProvider, - ReportStatus: func(*component.StatusEvent) { - }, }, } - receiver, err := factoryBase.CreateTracesReceiver(ctx, params, cfg, middleware.Wrap(shim)) + receiver, err := factoryBase.CreateTraces(ctx, params, cfg, middleware.Wrap(shim)) if err != nil { return nil, err } @@ -368,10 +366,6 @@ func (r *receiversShim) GetFactory(component.Kind, component.Type) component.Fac // GetExtensions implements component.Host func (r *receiversShim) GetExtensions() map[component.ID]extension.Extension { return nil } -func (r *receiversShim) GetExporters() map[component.DataType]map[component.ID]component.Component { - return nil -} - // observability shims func newLogger(level dslog.Level) *zap.Logger { zapLevel := zapcore.InfoLevel diff --git a/modules/distributor/receiver/shim_test.go b/modules/distributor/receiver/shim_test.go index 72cfed27596..970a09dcb46 100644 --- a/modules/distributor/receiver/shim_test.go +++ b/modules/distributor/receiver/shim_test.go @@ -161,9 +161,9 @@ func runReceiverShim(t *testing.T, receiverCfg map[string]interface{}, pusher Tr } func runOTelExporter(t *testing.T, factory exporter.Factory, cfg component.Config) (exporter.Traces, func()) { - exporter, err := factory.CreateTracesExporter( + exporter, err := factory.CreateTraces( context.Background(), - exporter.CreateSettings{ + exporter.Settings{ ID: component.MustNewID("test"), TelemetrySettings: component.TelemetrySettings{ Logger: zap.NewNop(), diff --git a/modules/generator/registry/native_histogram.go b/modules/generator/registry/native_histogram.go index e14cf7e3bdd..b0800d6f6b7 100644 --- a/modules/generator/registry/native_histogram.go +++ b/modules/generator/registry/native_histogram.go @@ -141,6 +141,8 @@ func (h *nativeHistogram) newSeries(labelValueCombo *LabelValueCombo, value floa NativeHistogramBucketFactor: 1.1, NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 15 * time.Minute, + // TODO enable examplars on native histograms + NativeHistogramMaxExemplars: -1, }), lastUpdated: 0, firstSeries: atomic.NewBool(true), @@ -248,7 +250,6 @@ func (h *nativeHistogram) collectMetrics(appender storage.Appender, timeMs int64 } } } - } return diff --git a/modules/generator/storage/instance_test.go b/modules/generator/storage/instance_test.go index 5ad4d9de81e..f3aa115c99f 100644 --- a/modules/generator/storage/instance_test.go +++ b/modules/generator/storage/instance_test.go @@ -12,8 +12,8 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/dskit/test" "github.com/grafana/dskit/user" - "github.com/grafana/tempo/modules/overrides" "github.com/prometheus/client_golang/prometheus" prometheus_common_config "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/atomic" + + "github.com/grafana/tempo/modules/overrides" ) // Verify basic functionality like sending metrics and exemplars, buffering and retrying failed @@ -73,13 +75,12 @@ func TestInstance(t *testing.T) { }) // Wait until remote.Storage has tried at least once to send data - err = waitUntil(20*time.Second, func() bool { + test.Poll(t, 30*time.Second, true, func() interface{} { mockServer.mtx.Lock() defer mockServer.mtx.Unlock() return mockServer.refusedRequests > 0 }) - require.NoError(t, err, "timed out while waiting for refused requests") // Allow requests mockServer.refuseRequests.Store(false) @@ -144,7 +145,7 @@ func TestInstance_multiTenancy(t *testing.T) { }) // Wait until every tenant received at least one request - err = waitUntil(20*time.Second, func() bool { + test.Poll(t, 45*time.Second, true, func() interface{} { mockServer.mtx.Lock() defer mockServer.mtx.Unlock() @@ -155,7 +156,6 @@ func TestInstance_multiTenancy(t *testing.T) { } return true }) - require.NoError(t, err, "timed out while waiting for accepted requests") cancel() for _, instance := range instances { @@ -238,13 +238,12 @@ func TestInstance_remoteWriteHeaders(t *testing.T) { }) // Wait until remote.Storage has tried at least once to send data - err = waitUntil(20*time.Second, func() bool { + test.Poll(t, 30*time.Second, true, func() interface{} { mockServer.mtx.Lock() defer mockServer.mtx.Unlock() return mockServer.refusedRequests > 0 }) - require.NoError(t, err, "timed out while waiting for refused requests") // Allow requests mockServer.refuseRequests.Store(false) @@ -348,22 +347,6 @@ func poll(ctx context.Context, interval time.Duration, f func()) { } } -// waitUntil executes f until it returns true or timeout is reached. -func waitUntil(timeout time.Duration, f func() bool) error { - start := time.Now() - - for { - if f() { - return nil - } - if time.Since(start) > timeout { - return fmt.Errorf("timed out while waiting for condition") - } - - time.Sleep(50 * time.Millisecond) - } -} - var _ Overrides = (*mockOverrides)(nil) type mockOverrides struct { diff --git a/tools/packaging/tempo.yaml b/tools/packaging/tempo.yaml new file mode 100644 index 00000000000..97a0d671b1b --- /dev/null +++ b/tools/packaging/tempo.yaml @@ -0,0 +1,48 @@ +stream_over_http_enabled: true +server: + http_listen_port: 3200 + log_level: info + +query_frontend: + search: + duration_slo: 5s + throughput_bytes_slo: 1.073741824e+09 + metadata_slo: + duration_slo: 5s + throughput_bytes_slo: 1.073741824e+09 + trace_by_id: + duration_slo: 5s + +distributor: + receivers: + otlp: + protocols: + grpc: + endpoint: "0.0.0.0:4317" + +metrics_generator: + registry: + external_labels: + source: tempo + cluster: docker-compose + storage: + path: /var/tempo/generator/wal + remote_write: + - url: http://prometheus:9090/api/v1/write + send_exemplars: true + traces_storage: + path: /var/tempo/generator/traces + +storage: + trace: + backend: local # backend configuration to use + wal: + path: /var/tempo/wal # where to store the wal locally + local: + path: /var/tempo/blocks + +overrides: + defaults: + metrics_generator: + processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator + generate_native_histograms: both diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 73d8ea9450a..5b6a2a5bea7 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,122 @@ # Changelog +## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22) + + +### Bug Fixes + +* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844) +* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114)) + +## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09) + + +### Bug Fixes + +* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962) +* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287)) + +## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01) + + +### Bug Fixes + +* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907) + +## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30) + + +### Bug Fixes + +* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8)) + +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + +## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) + + +### Bug Fixes + +* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948)) + +## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) + + +### Features + +* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45)) + +## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13) + + +### Bug Fixes + +* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638) + +## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07) + + +### Features + +* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b)) + +## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) +* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8)) +* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22) + + +### Bug Fixes + +* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544) + +## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10) + + +### Bug Fixes + +* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + ## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a074..6fe4f0763e3 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 58af9318877..314bd292e3f 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. package auth import ( @@ -101,6 +106,20 @@ func (t *Token) IsValid() bool { return t.isValidWithEarlyExpiry(defaultExpiryDelta) } +// MetadataString is a convenience method for accessing string values in the +// token's metadata. Returns an empty string if the metadata is nil or the value +// for the given key cannot be cast to a string. +func (t *Token) MetadataString(k string) string { + if t.Metadata == nil { + return "" + } + s, ok := t.Metadata[k].(string) + if !ok { + return "" + } + return s +} + func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool { if t.isEmpty() { return false @@ -116,7 +135,9 @@ func (t *Token) isEmpty() bool { } // Credentials holds Google credentials, including -// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials type Credentials struct { json []byte projectID CredentialsPropertyProvider @@ -244,7 +265,7 @@ func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { } func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { - if ctpo == nil { + if ctpo == nil || ctpo.ExpireEarly == 0 { return defaultExpiryDelta } return ctpo.ExpireEarly @@ -307,7 +328,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() @@ -322,13 +345,14 @@ func (c *cachedTokenProvider) tokenState() tokenState { c.mu.Lock() defer c.mu.Unlock() t := c.cachedToken + now := timeNow() if t == nil || t.Value == "" { return invalid } else if t.Expiry.IsZero() { return fresh - } else if timeNow().After(t.Expiry.Round(0)) { + } else if now.After(t.Expiry.Round(0)) { return invalid - } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) { + } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) { return stale } return fresh @@ -479,7 +503,7 @@ func (o *Options2LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } func (o *Options2LO) validate() error { diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index 2d9a73edf36..010afc37c8f 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -98,8 +98,8 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil @@ -190,7 +190,7 @@ func (o *DetectOptions) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) { diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index fe93557389d..6591b181132 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { case credsfile.ServiceAccountKey: @@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, @@ -174,6 +177,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions Scopes: opts.scopes(), WorkforcePoolUserProject: f.WorkforcePoolUserProject, Client: opts.client(), + IsDefaultClient: opts.Client == nil, } if f.ServiceAccountImpersonation != nil { externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go index fb9c62c610d..dced1ec4044 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/compute.go @@ -43,8 +43,8 @@ func computeCredentials(opts *Options) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: 5 * time.Minute, }), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go index 2c1ad6004ae..b66c6551e6e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/idtoken.go @@ -12,6 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package idtoken provides functionality for generating and validating ID +// tokens, with configurable options for audience, custom claims, and token +// formats. +// +// For more information on ID tokens, see +// https://cloud.google.com/docs/authentication/token-types#id. package idtoken import ( @@ -72,7 +78,7 @@ type Options struct { func (o *Options) client() *http.Client { if o == nil || o.Client == nil { - return internal.CloneDefaultClient() + return internal.DefaultClient() } return o.Client } diff --git a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go index d653bf2c189..4b17af20211 100644 --- a/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go +++ b/vendor/cloud.google.com/go/auth/credentials/idtoken/validate.go @@ -40,7 +40,7 @@ const ( ) var ( - defaultValidator = &Validator{client: newCachingClient(internal.CloneDefaultClient())} + defaultValidator = &Validator{client: newCachingClient(internal.DefaultClient())} // now aliases time.Now for testing. now = time.Now ) @@ -83,7 +83,7 @@ func NewValidator(opts *ValidatorOptions) (*Validator, error) { if opts != nil && opts.Client != nil { client = opts.Client } else { - client = internal.CloneDefaultClient() + client = internal.DefaultClient() } return &Validator{client: newCachingClient(client)}, nil } diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go index d4affc17336..e51bee7d876 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/idtoken.go @@ -103,7 +103,7 @@ func NewIDTokenCredentials(opts *IDTokenOptions) (*auth.Credentials, error) { } } else if opts.Client == nil { creds = opts.Credentials - client = internal.CloneDefaultClient() + client = internal.DefaultClient() if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go index 0be955acde8..91b42bc3f7f 100644 --- a/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go +++ b/vendor/cloud.google.com/go/auth/credentials/impersonate/impersonate.go @@ -79,7 +79,7 @@ func NewCredentials(opts *CredentialsOptions) (*auth.Credentials, error) { } } else if opts.Credentials != nil { creds = opts.Credentials - client = internal.CloneDefaultClient() + client = internal.DefaultClient() if err := httptransport.AddAuthorizationMiddleware(client, opts.Credentials); err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go index a34f6b06f84..d8b5d4fdeb9 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go @@ -94,32 +94,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) if sp.RegionalCredVerificationURL == "" { sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL } - if sp.requestSigner == nil { - headers := make(map[string]string) - if sp.shouldUseMetadataServer() { - awsSessionToken, err := sp.getAWSSessionToken(ctx) - if err != nil { - return "", err - } - - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken - } - } - - awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + headers := make(map[string]string) + if sp.shouldUseMetadataServer() { + awsSessionToken, err := sp.getAWSSessionToken(ctx) if err != nil { return "", err } - if sp.region, err = sp.getRegion(ctx, headers); err != nil { - return "", err - } - sp.requestSigner = &awsRequestSigner{ - RegionName: sp.region, - AwsSecurityCredentials: awsSecurityCredentials, + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken } } + awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers) + if err != nil { + return "", err + } + if sp.region, err = sp.getRegion(ctx, headers); err != nil { + return "", err + } + sp.requestSigner = &awsRequestSigner{ + RegionName: sp.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + // Generate the signed request to AWS STS GetCallerIdentity API. // Use the required regional endpoint. Otherwise, the request will fail. req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil) diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go index b19c6edeae5..112186a9e6e 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go @@ -100,6 +100,10 @@ type Options struct { AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider // Client for token request. Client *http.Client + // IsDefaultClient marks whether the client passed in is a default client that can be overriden. + // This is important for X509 credentials which should create a new client if the default was used + // but should respect a client explicitly passed in by the user. + IsDefaultClient bool } // SubjectTokenProvider can be used to supply a subject token to exchange for a @@ -181,6 +185,26 @@ func (o *Options) validate() error { return nil } +// client returns the http client that should be used for the token exchange. If a non-default client +// is provided, then the client configured in the options will always be returned. If a default client +// is provided and the options are configured for X509 credentials, a new client will be created. +func (o *Options) client() (*http.Client, error) { + // If a client was provided and no override certificate config location was provided, use the provided client. + if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") { + return o.Client, nil + } + + // If a new client should be created, validate and use the certificate source to create a new mTLS client. + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return createX509Client(cert.CertificateConfigLocation) +} + // resolveTokenURL sets the default STS token endpoint with the configured // universe domain. func (o *Options) resolveTokenURL() { @@ -204,11 +228,18 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { if err != nil { return nil, err } + + client, err := opts.client() + if err != nil { + return nil, err + } + tp := &tokenProvider{ - client: opts.Client, + client: client, opts: opts, stp: stp, } + if opts.ServiceAccountImpersonationURL == "" { return auth.NewCachedTokenProvider(tp, nil), nil } @@ -218,7 +249,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) { // needed for impersonation tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} imp, err := impersonate.NewTokenProvider(&impersonate.Options{ - Client: opts.Client, + Client: client, URL: opts.ServiceAccountImpersonationURL, Scopes: scopes, Tp: auth.NewCachedTokenProvider(tp, nil), @@ -353,6 +384,15 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) { execProvider.opts = o execProvider.env = runtimeEnvironment{} return execProvider, nil + } else if o.CredentialSource.Certificate != nil { + cert := o.CredentialSource.Certificate + if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" { + return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true") + } + if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" { + return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true") + } + return &x509Provider{}, nil } return nil, errors.New("credentials: unable to parse credential source") } diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go index e33d35a2687..0a020599e07 100644 --- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go @@ -30,6 +30,7 @@ const ( fileTypeJSON = "json" urlProviderType = "url" programmaticProviderType = "programmatic" + x509ProviderType = "x509" ) type urlSubjectProvider struct { diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go new file mode 100644 index 00000000000..115df5881f1 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go @@ -0,0 +1,63 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package externalaccount + +import ( + "context" + "crypto/tls" + "net/http" + "time" + + "cloud.google.com/go/auth/internal/transport/cert" +) + +// x509Provider implements the subjectTokenProvider type for +// x509 workload identity credentials. Because x509 credentials +// rely on an mTLS connection to represent the 3rd party identity +// rather than a subject token, this provider will always return +// an empty string when a subject token is requested by the external account +// token provider. +type x509Provider struct { +} + +func (xp *x509Provider) providerType() string { + return x509ProviderType +} + +func (xp *x509Provider) subjectToken(ctx context.Context) (string, error) { + return "", nil +} + +// createX509Client creates a new client that is configured with mTLS, using the +// certificate configuration specified in the credential source. +func createX509Client(certificateConfigLocation string) (*http.Client, error) { + certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation) + if err != nil { + return nil, err + } + trans := http.DefaultTransport.(*http.Transport).Clone() + + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: certProvider, + } + + // Create a client with default settings plus the X509 workload cert and key. + client := &http.Client{ + Transport: trans, + Timeout: 30 * time.Second, + } + + return client, nil +} diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d5d..6ae29de6c27 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index 8dbfa7ef7e9..8696df1487f 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -22,7 +22,7 @@ import ( "strings" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/internal/compute" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) @@ -55,7 +55,7 @@ func checkDirectPathEndPoint(endpoint string) bool { return true } -func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool { +func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool { if tp == nil { return false } @@ -66,10 +66,13 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool if tok == nil { return false } - if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" { + if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath { + return true + } + if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" { return false } - if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" { + if tok.MetadataString("auth.google.serviceAccount") != "default" { return false } return true @@ -91,7 +94,7 @@ func isDirectPathXdsUsed(o *Options) bool { // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 5c3bc66f998..42d4cbe3062 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( @@ -20,15 +22,19 @@ import ( "errors" "fmt" "net/http" + "os" + "sync" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" "cloud.google.com/go/auth/internal" "cloud.google.com/go/auth/internal/transport" "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "google.golang.org/grpc" grpccreds "google.golang.org/grpc/credentials" grpcinsecure "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/stats" ) const ( @@ -38,7 +44,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -46,6 +52,27 @@ var ( timeoutDialerOption grpc.DialOption ) +// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across +// all dial connections to avoid the memory leak documented in +// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226 +// +// TODO: When this module depends on a version of otelgrpc containing the fix, +// replace this singleton with inline usage for simplicity. +// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797. +var ( + initOtelStatsHandlerOnce sync.Once + otelStatsHandler stats.Handler +) + +// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all +// dial connections. +func otelGRPCStatsHandler() stats.Handler { + initOtelStatsHandlerOnce.Do(func() { + otelStatsHandler = otelgrpc.NewClientHandler() + }) + return otelStatsHandler +} + // ClientCertProvider is a function that returns a TLS client certificate to be // used when opening TLS connections. It follows the same semantics as // [crypto/tls.Config.GetClientCertificate]. @@ -271,7 +298,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -289,9 +319,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, opts) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.NewClient(endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. @@ -325,29 +356,39 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return c.clientUniverseDomain + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := c.creds.Token(ctx) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } if c.secure { ri, _ := grpccreds.RequestInfoFromContext(ctx) if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil { @@ -382,3 +423,10 @@ func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOpt } return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } + +func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption { + if opts.DisableTelemetry { + return dialOpts + } + return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler())) +} diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 969c8d4d200..30fedf9562f 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 94caeb00f0a..63498ee792b 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -27,11 +28,12 @@ import ( "cloud.google.com/go/auth/internal/transport" "cloud.google.com/go/auth/internal/transport/cert" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -41,6 +43,9 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err headers: headers, } var trans http.RoundTripper = ht + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, opts) trans = addOCTransport(trans, opts) switch { case opts.DisableAuthentication: @@ -76,7 +81,10 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ @@ -94,7 +102,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { @@ -155,6 +167,13 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) { return rt.RoundTrip(&newReq) } +func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { + if opts.DisableTelemetry { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper { if opts.DisableTelemetry { return trans @@ -171,13 +190,23 @@ type authTransport struct { clientUniverseDomain string } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an @@ -193,17 +222,19 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) { } }() } - credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) - if err != nil { - return nil, err - } - if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { - return nil, err - } token, err := t.creds.Token(req.Context()) if err != nil { return nil, err } + if token.MetadataString("auth.google.tokenSource") != "compute-metadata" { + credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context()) + if err != nil { + return nil, err + } + if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil { + return nil, err + } + } req2 := req.Clone(req.Context()) SetAuthHeader(token, req2) reqBodyClosed = true diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 00000000000..651bd61fbbc --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 00000000000..af490bf4f49 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go new file mode 100644 index 00000000000..d92178df86c --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -0,0 +1,23 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import "os" + +const linuxProductNameFile = "/sys/class/dmi/id/product_name" + +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 00000000000..16be9df3064 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go index 69e30779f98..3be6e5bbb41 100644 --- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go +++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go @@ -90,19 +90,20 @@ type ExternalAccountAuthorizedUserFile struct { // CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. // -// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question. +// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question. // The EnvironmentID should start with AWS if being used for an AWS credential. type CredentialSource struct { - File string `json:"file"` - URL string `json:"url"` - Headers map[string]string `json:"headers"` - Executable *ExecutableConfig `json:"executable,omitempty"` - EnvironmentID string `json:"environment_id"` - RegionURL string `json:"region_url"` - RegionalCredVerificationURL string `json:"regional_cred_verification_url"` - CredVerificationURL string `json:"cred_verification_url"` - IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` - Format *Format `json:"format,omitempty"` + File string `json:"file"` + URL string `json:"url"` + Headers map[string]string `json:"headers"` + Executable *ExecutableConfig `json:"executable,omitempty"` + Certificate *CertificateConfig `json:"certificate"` + EnvironmentID string `json:"environment_id"` // TODO: Make type for this + RegionURL string `json:"region_url"` + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + CredVerificationURL string `json:"cred_verification_url"` + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + Format *Format `json:"format,omitempty"` } // Format describes the format of a [CredentialSource]. @@ -121,6 +122,13 @@ type ExecutableConfig struct { OutputFile string `json:"output_file"` } +// CertificateConfig represents the options used to set up X509 based workload +// [CredentialSource] +type CertificateConfig struct { + UseDefaultCertificateConfig bool `json:"use_default_certificate_config"` + CertificateConfigLocation string `json:"certificate_config_location"` +} + // ServiceAccountImpersonationInfo has impersonation configuration. type ServiceAccountImpersonationInfo struct { TokenLifetimeSeconds int `json:"token_lifetime_seconds"` diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 8c328e2fbd9..d8c16119180 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -38,18 +38,35 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. DefaultUniverseDomain = "googleapis.com" ) -// CloneDefaultClient returns a [http.Client] with some good defaults. -func CloneDefaultClient() *http.Client { +type clonableTransport interface { + Clone() *http.Transport +} + +// DefaultClient returns an [http.Client] with some defaults set. If +// the current [http.DefaultTransport] is a [clonableTransport], as +// is the case for an [*http.Transport], the clone will be used. +// Otherwise the [http.DefaultTransport] is used directly. +func DefaultClient() *http.Client { + if transport, ok := http.DefaultTransport.(clonableTransport); ok { + return &http.Client{ + Transport: transport.Clone(), + Timeout: 30 * time.Second, + } + } + return &http.Client{ - Transport: http.DefaultTransport.(*http.Transport).Clone(), + Transport: http.DefaultTransport, Timeout: 30 * time.Second, } } @@ -181,8 +198,9 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string // httpGetMetadataUniverseDomain is a package var for unit test substitution. var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) { - client := metadata.NewClient(&http.Client{Timeout: time.Second}) - return client.GetWithContext(ctx, "universe/universe_domain") + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + return metadata.GetWithContext(ctx, "universe/universe-domain") } func getMetadataUniverseDomain(ctx context.Context) (string, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go index d94e0af08a3..f606888f120 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go @@ -17,7 +17,9 @@ package transport import ( "context" "crypto/tls" + "crypto/x509" "errors" + "log" "net" "net/http" "net/url" @@ -44,10 +46,12 @@ const ( googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS" universeDomainPlaceholder = "UNIVERSE_DOMAIN" + + mtlsMDSRoot = "/run/google-mds-mtls/root.crt" + mtlsMDSKey = "/run/google-mds-mtls/client.key" ) var ( - mdsMTLSAutoConfigSource mtlsConfigSource errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") ) @@ -120,7 +124,24 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede defaultTransportCreds := credentials.NewTLS(&tls.Config{ GetClientCertificate: config.clientCertSource, }) - if config.s2aAddress == "" { + + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return defaultTransportCreds, config.endpoint, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return defaultTransportCreds, config.endpoint, nil } @@ -133,8 +154,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede } s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) if err != nil { // Use default if we cannot initialize S2A client transport credentials. @@ -151,7 +173,23 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, return nil, nil, err } - if config.s2aAddress == "" { + var s2aAddr string + var transportCredsForS2A credentials.TransportCredentials + + if config.mtlsS2AAddress != "" { + s2aAddr = config.mtlsS2AAddress + transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey) + if err != nil { + log.Printf("Loading MTLS MDS credentials failed: %v", err) + if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { + return config.clientCertSource, nil, nil + } + } + } else if config.s2aAddress != "" { + s2aAddr = config.s2aAddress + } else { return config.clientCertSource, nil, nil } @@ -169,12 +207,38 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, } dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ - S2AAddress: config.s2aAddress, - FallbackOpts: fallbackOpts, + S2AAddress: s2aAddr, + TransportCreds: transportCredsForS2A, + FallbackOpts: fallbackOpts, }) return nil, dialTLSContextFunc, nil } +func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) { + rootPEM, err := os.ReadFile(mtlsMDSRootFile) + if err != nil { + return nil, err + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(rootPEM) + if !ok { + return nil, errors.New("failed to load MTLS MDS root certificate") + } + // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain + // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the + // tls.X509KeyPair function as both the certificate chain and private key arguments. + cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile) + if err != nil { + return nil, err + } + tlsConfig := tls.Config{ + RootCAs: caCertPool, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS13, + } + return credentials.NewTLS(&tlsConfig), nil +} + func getTransportConfig(opts *Options) (*transportConfig, error) { clientCertSource, err := GetClientCertificateProvider(opts) if err != nil { @@ -196,17 +260,17 @@ func getTransportConfig(opts *Options) (*transportConfig, error) { return nil, errUniverseNotSupportedMTLS } - s2aMTLSEndpoint := opts.DefaultMTLSEndpoint - s2aAddress := GetS2AAddress() - if s2aAddress == "" { + mtlsS2AAddress := GetMTLSS2AAddress() + if s2aAddress == "" && mtlsS2AAddress == "" { return &defaultTransportConfig, nil } return &transportConfig{ clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + mtlsS2AAddress: mtlsS2AAddress, + s2aMTLSEndpoint: opts.DefaultMTLSEndpoint, }, nil } @@ -241,8 +305,10 @@ type transportConfig struct { clientCertSource cert.Provider // The corresponding endpoint to use based on client certificate source. endpoint string - // The S2A address if it can be used, otherwise an empty string. + // The plaintext S2A address if it can be used, otherwise an empty string. s2aAddress string + // The MTLS S2A address if it can be used, otherwise an empty string. + mtlsS2AAddress string // The MTLS endpoint to use with S2A. s2aMTLSEndpoint string } diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go index 36651591612..6c954ae193c 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go @@ -16,7 +16,6 @@ package cert import ( "crypto/tls" - "errors" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -37,10 +36,9 @@ type ecpSource struct { func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, client.ErrCredUnavailable) { - return nil, errSourceUnavailable - } - return nil, err + // TODO(codyoss): once this is fixed upstream can handle this error a + // little better here. But be safe for now and assume unavailable. + return nil, errSourceUnavailable } return (&ecpSource{ diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba280c..738cb21618e 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go index e8675bf824b..347aaced721 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go @@ -82,10 +82,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) func getCertAndKeyFiles(configFilePath string) (string, string, error) { jsonFile, err := os.Open(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - return "", "", errSourceUnavailable - } - return "", "", err + return "", "", errSourceUnavailable } byteValue, err := io.ReadAll(jsonFile) diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 2ed532deb7a..37894bfcd01 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,12 +15,13 @@ package transport import ( + "context" "encoding/json" + "fmt" "log" "os" "strconv" "sync" - "time" "cloud.google.com/go/auth/internal/transport/cert" "cloud.google.com/go/compute/metadata" @@ -31,41 +32,38 @@ const ( ) var ( - // The period an MTLS config can be reused before needing refresh. - configExpiry = time.Hour + mtlsConfiguration *mtlsConfig - // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. mtlsOnce sync.Once ) // GetS2AAddress returns the S2A address to be reached via plaintext connection. // Returns empty string if not set or invalid. func GetS2AAddress() string { - c, err := getMetadataMTLSAutoConfig().Config() - if err != nil { - return "" - } - if !c.Valid() { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { return "" } - return c.S2A.PlaintextAddress + return mtlsConfiguration.S2A.PlaintextAddress } -type mtlsConfigSource interface { - Config() (*mtlsConfig, error) +// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection. +// Returns empty string if not set or invalid. +func GetMTLSS2AAddress() string { + getMetadataMTLSAutoConfig() + if !mtlsConfiguration.valid() { + return "" + } + return mtlsConfiguration.S2A.MTLSAddress } // mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. type mtlsConfig struct { - S2A *s2aAddresses `json:"s2a"` - Expiry time.Time + S2A *s2aAddresses `json:"s2a"` } -func (c *mtlsConfig) Valid() bool { - return c != nil && c.S2A != nil && !c.expired() -} -func (c *mtlsConfig) expired() bool { - return c.Expiry.Before(time.Now()) +func (c *mtlsConfig) valid() bool { + return c != nil && c.S2A != nil } // s2aAddresses contains the plaintext and/or MTLS S2A addresses. @@ -76,80 +74,36 @@ type s2aAddresses struct { MTLSAddress string `json:"mtls_address"` } -// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. -func getMetadataMTLSAutoConfig() mtlsConfigSource { +func getMetadataMTLSAutoConfig() { + var err error mtlsOnce.Do(func() { - mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ - src: &metadataMTLSAutoConfig{}, + mtlsConfiguration, err = queryConfig() + if err != nil { + log.Printf("Getting MTLS config failed: %v", err) } }) - return mdsMTLSAutoConfigSource -} - -// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. -// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. -type reuseMTLSConfigSource struct { - src mtlsConfigSource // src.Config() is called when config is expired - mu sync.Mutex // mutex guards config - config *mtlsConfig // cached config } -func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { - cs.mu.Lock() - defer cs.mu.Unlock() - - if cs.config.Valid() { - return cs.config, nil - } - c, err := cs.src.Config() - if err != nil { - return nil, err - } - cs.config = c - return c, nil -} - -// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource -// It has the logic to query MDS and return an mtlsConfig -type metadataMTLSAutoConfig struct{} - var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) + return metadata.GetWithContext(context.Background(), configEndpointSuffix) } -func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { +func queryConfig() (*mtlsConfig, error) { resp, err := httpGetMetadataMTLSConfig() if err != nil { - log.Printf("querying MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err) } var config mtlsConfig err = json.Unmarshal([]byte(resp), &config) if err != nil { - log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err) } - if config.S2A == nil { - log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) - return defaultMTLSConfig(), nil + return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config) } - - // set new expiry - config.Expiry = time.Now().Add(configExpiry) return &config, nil } -func defaultMTLSConfig() *mtlsConfig { - return &mtlsConfig{ - S2A: &s2aAddresses{ - PlaintextAddress: "", - MTLSAddress: "", - }, - Expiry: time.Now().Add(configExpiry), - } -} - func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool { // If client cert is found, use that over S2A. if clientCertSource != nil { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 718a6b17145..cc586ec5b1a 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -81,12 +81,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri // DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() + trans := BaseTransport() trans.TLSClientConfig = tlsConfig return &http.Client{Transport: trans} } -func baseTransport() *http.Transport { +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md index ff9747beda0..a1ef2923799 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md @@ -1,5 +1,33 @@ # Changelog +## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21) + + +### Bug Fixes + +* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161) + +## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30) + + +### Bug Fixes + +* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52)) + +## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08) + + +### Bug Fixes + +* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758)) + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10) + + +### Bug Fixes + +* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b)) + ## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23) diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go index 9835ac571cf..9cc33e5ee64 100644 --- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go +++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go @@ -26,6 +26,13 @@ import ( "golang.org/x/oauth2/google" ) +const ( + oauth2TokenSourceKey = "oauth2.google.tokenSource" + oauth2ServiceAccountKey = "oauth2.google.serviceAccount" + authTokenSourceKey = "auth.google.tokenSource" + authServiceAccountKey = "auth.google.serviceAccount" +) + // TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource] // into a [cloud.google.com/go/auth.TokenProvider]. func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider { @@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) { } return nil, err } + // Preserve compute token metadata, for both types of tokens. + metadata := map[string]interface{}{} + if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok { + metadata[authTokenSourceKey] = val + metadata[oauth2TokenSourceKey] = val + } + if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok { + metadata[authServiceAccountKey] = val + metadata[oauth2ServiceAccountKey] = val + } return &auth.Token{ - Value: tok.AccessToken, - Type: tok.Type(), - Expiry: tok.Expiry, + Value: tok.AccessToken, + Type: tok.Type(), + Expiry: tok.Expiry, + Metadata: metadata, }, nil } @@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) { } return nil, err } - return &oauth2.Token{ + tok2 := &oauth2.Token{ AccessToken: tok.Value, TokenType: tok.Type, Expiry: tok.Expiry, - }, nil + } + // Preserve token metadata. + m := tok.Metadata + if m != nil { + // Copy map to avoid concurrent map writes error (#11161). + metadata := make(map[string]interface{}, len(m)+2) + for k, v := range m { + metadata[k] = v + } + // Append compute token metadata in converted form. + if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" { + metadata[oauth2TokenSourceKey] = val + } + if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" { + metadata[oauth2ServiceAccountKey] = val + } + tok2 = tok2.WithExtra(metadata) + } + return tok2, nil } // AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials] diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go index a8ce6cd8a8d..97a57f4694b 100644 --- a/vendor/cloud.google.com/go/auth/threelegged.go +++ b/vendor/cloud.google.com/go/auth/threelegged.go @@ -128,7 +128,7 @@ func (o *Options3LO) client() *http.Client { if o.Client != nil { return o.Client } - return internal.CloneDefaultClient() + return internal.DefaultClient() } // authCodeURL returns a URL that points to a OAuth2 consent page. diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2793..da7db19b1c6 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7297..c160b4786bb 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string code = res.StatusCode } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917e..2e53f012300 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 5aab66312bd..2331bfc1b1d 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,13 @@ # Changes +## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5)) + ## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 6e5e80087d6..284ea54e3c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,24 @@ # Release History +## 1.2.1 (2023-12-13) + +### Features Added + +* Exposed GetSASURL from specialized clients + +### Bugs Fixed + +* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). +* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method +* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. +* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). +* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). +* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). + +### Other Changes + +* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. + ## 1.2.0 (2023-10-11) ### Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index 69913e334d4..2229b7d85e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -10,6 +10,7 @@ import ( "context" "errors" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "os" "time" @@ -338,6 +339,12 @@ func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return ab.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index ee07ad45b1e..80d6183c5b5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_818d8addd0" + "Tag": "go/storage/azblob_0040e8284c" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 55de9b34958..d2421ddd916 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -464,7 +464,7 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize) defer buffers.Free() - aquireBuffer := func() ([]byte, error) { + acquireBuffer := func() ([]byte, error) { select { case b := <-buffers.Acquire(): // got a buffer @@ -489,21 +489,23 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO /* * We have created as many channels as the number of chunks we have. * Each downloaded block will be sent to the channel matching its - * sequece number, i.e. 0th block is sent to 0th channel, 1st block + * sequence number, i.e. 0th block is sent to 0th channel, 1st block * to 1st channel and likewise. The blocks are then read and written * to the file serially by below goroutine. Do note that the blocks - * blocks are still downloaded parallelly from n/w, only serailized + * are still downloaded parallelly from n/w, only serialized * and written to file here. */ writerError := make(chan error) + writeSize := int64(0) go func(ch chan error) { for _, block := range blocks { select { case <-ctx.Done(): return case block := <-block: - _, err := writer.Write(block) - buffers.Release(block) + n, err := writer.Write(block) + writeSize += int64(n) + buffers.Release(block[:cap(block)]) if err != nil { ch <- err return @@ -521,7 +523,7 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO NumChunks: numChunks, Concurrency: o.Concurrency, Operation: func(ctx context.Context, chunkStart int64, count int64) error { - buff, err := aquireBuffer() + buff, err := acquireBuffer() if err != nil { return err } @@ -538,8 +540,8 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO return err } - blockIndex := (chunkStart / o.BlockSize) - blocks[blockIndex] <- buff + blockIndex := chunkStart / o.BlockSize + blocks[blockIndex] <- buff[:count] return nil }, }) @@ -551,7 +553,7 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO if err = <-writerError; err != nil { return 0, err } - return count, nil + return writeSize, nil } // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index 5a79c12d435..d7334688946 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -51,7 +51,7 @@ type Tags = generated.BlobTag // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange // Request Model Declaration ------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go index 8a1573c0ce2..07fad60611b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -69,6 +69,7 @@ const ( CopyIDMismatch Code = "CopyIdMismatch" EmptyMetadataKey Code = "EmptyMetadataKey" FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" @@ -122,6 +123,7 @@ const ( NoAuthenticationInformation Code = "NoAuthenticationInformation" NoPendingCopyOperation Code = "NoPendingCopyOperation" OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" OperationTimedOut Code = "OperationTimedOut" OutOfRangeInput Code = "OutOfRangeInput" OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index 212255d4c66..24df42c75ef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -75,7 +75,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } var n int - n, err = io.ReadFull(src, buffer) + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) if n > 0 { // some data was read, upload it @@ -108,7 +108,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } if err != nil { // The reader is done, no more outgoing buffers - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + if errors.Is(err, io.EOF) { // these are expected errors, we don't surface those err = nil } else { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index 8b542f85a8b..e3167b7747d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -13,6 +13,7 @@ import ( "errors" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "math" "os" @@ -129,7 +130,7 @@ func (bb *Client) URL() string { return bb.generated().Endpoint() } -// BlobClient returns the embedded blob client for this AppendBlob client. +// BlobClient returns the embedded blob client for this BlockBlob client. func (bb *Client) BlobClient() *blob.Client { blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) return (*blob.Client)(blobClient) @@ -410,6 +411,12 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return bb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Upload Functions ----------------------------------------------------------------------------------------- // uploadFromReader uploads a buffer in blocks to a block blob. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index f5100e13113..03035033891 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -26,6 +26,7 @@ stages: parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true + UsePipelineProxy: false EnvVars: AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go index 560e151d553..48771e8c9c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -32,5 +32,5 @@ func ParseURL(u string) (URLParts, error) { // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 0bdbaefaf22..c95f19254a7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -71,7 +71,10 @@ type CompositeClient[T, U any] struct { } func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { - return &Client[T]{inner: client.innerT}, client.innerU + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU } func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go index 64a88688a49..02966ee3e9a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -49,7 +49,7 @@ func createBatchID() (string, error) { // Content-Length: 0 func buildSubRequest(req *policy.Request) []byte { var batchSubRequest strings.Builder - blobPath := req.Raw().URL.Path + blobPath := req.Raw().URL.EscapedPath() if len(req.Raw().URL.RawQuery) > 0 { blobPath += "?" + req.Raw().URL.RawQuery } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go index 9bc1ca47df8..d0355727c90 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -13,7 +13,7 @@ import ( // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange struct { Offset int64 Count int64 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index 935debca3d5..c8be74c2971 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azblob" - ModuleVersion = "v1.2.0" + ModuleVersion = "v1.2.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 367f020f4d5..25deeec3587 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -19,7 +19,7 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.49" +use: "@autorest/go@4.0.0-preview.61" ``` ### Updating service version to 2023-08-03 @@ -280,7 +280,9 @@ directive: ``` yaml directive: -- from: zz_models.go +- from: + - zz_models.go + - zz_options.go where: $ transform: >- return $. @@ -443,8 +445,8 @@ directive: where: $ transform: >- return $. - replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g, - `if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`); + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); ``` ### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index 32be22221d7..dbfe069e6f1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -44,18 +43,21 @@ type AppendBlobClient struct { // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err } - return client.appendBlockHandleResponse(resp) + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err } // appendBlockCreateRequest creates the AppendBlock request. @@ -127,22 +129,19 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co // appendBlockHandleResponse handles the AppendBlock response. func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { result := AppendBlobClientAppendBlockResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -151,14 +150,12 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -167,16 +164,14 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -185,11 +180,18 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -213,18 +215,21 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err } - return client.appendBlockFromURLHandleResponse(resp) + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err } // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. @@ -315,22 +320,16 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont // appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { result := AppendBlobClientAppendBlockFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -339,11 +338,12 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -352,16 +352,8 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -376,6 +368,19 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.IsServerEncrypted = &isServerEncrypted } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -391,18 +396,21 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientCreateResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -496,15 +504,8 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content // createHandleResponse handles the Create response. func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { result := AppendBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -513,18 +514,6 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -532,6 +521,15 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -539,11 +537,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -559,18 +567,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock // method. func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) if err != nil { return AppendBlobClientSealResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientSealResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err } - return client.sealHandleResponse(resp) + resp, err := client.sealHandleResponse(httpResp) + return resp, err } // sealCreateRequest creates the Seal request. @@ -614,25 +625,9 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * // sealHandleResponse handles the Seal response. func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { result := AppendBlobClientSealResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -640,6 +635,9 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { @@ -647,5 +645,18 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.IsSealed = &isSealed } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 257a3656dd1..caaa3dfed7c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -38,18 +37,21 @@ type BlobClient struct { // - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err } - return client.abortCopyFromURLHandleResponse(resp) + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. @@ -83,12 +85,6 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -96,6 +92,12 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -109,18 +111,21 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B // - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -166,6 +171,16 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio // acquireLeaseHandleResponse handles the AcquireLease response. func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -179,22 +194,12 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -205,18 +210,21 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC // - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientBreakLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -261,6 +269,16 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * // breakLeaseHandleResponse handles the BreakLease response. func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -279,22 +297,12 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -309,18 +317,21 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return BlobClientChangeLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -364,6 +375,16 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID // changeLeaseHandleResponse handles the ChangeLease response. func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -374,25 +395,15 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -411,18 +422,21 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) if err != nil { return BlobClientCopyFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err } - return client.copyFromURLHandleResponse(resp) + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err } // copyFromURLCreateRequest creates the CopyFromURL request. @@ -513,34 +527,22 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour // copyFromURLHandleResponse handles the CopyFromURL response. func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { result := BlobClientCopyFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.Date = &date + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val @@ -548,22 +550,34 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -578,18 +592,21 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err } - return client.createSnapshotHandleResponse(resp) + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err } // createSnapshotCreateRequest creates the CreateSnapshot request. @@ -652,12 +669,26 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio // createSnapshotHandleResponse handles the CreateSnapshot response. func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { result := BlobClientCreateSnapshotResponse{} - if val := resp.Header.Get("x-ms-snapshot"); val != "" { - result.Snapshot = &val + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { @@ -665,32 +696,18 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } return result, nil } @@ -712,18 +729,21 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientDeleteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -781,12 +801,6 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -794,6 +808,12 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -804,18 +824,21 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD // - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err } - return client.deleteImmutabilityPolicyHandleResponse(resp) + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. @@ -844,12 +867,6 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -857,6 +874,12 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -870,18 +893,21 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientDownloadResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDownloadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err } - return client.downloadHandleResponse(resp) + resp, err := client.downloadHandleResponse(httpResp) + return resp, err } // downloadCreateRequest creates the Download request. @@ -949,83 +975,75 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl // downloadHandleResponse handles the Download response. func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { result := BlobClientDownloadResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientDownloadResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.CreationTime = &creationTime + result.BlobContentMD5 = blobContentMD5 } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientDownloadResponse{}, err } + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentLength = &contentLength + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val } if val := resp.Header.Get("Content-Encoding"); val != "" { result.ContentEncoding = &val } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1034,9 +1052,6 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1049,36 +1064,15 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + result.CreationTime = &creationTime } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1087,20 +1081,8 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1108,19 +1090,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobContentMD5 = blobContentMD5 + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.TagCount = &tagCount + result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1129,6 +1117,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.IsSealed = &isSealed } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1136,15 +1131,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1153,15 +1154,40 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LegalHold = &legalHold } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.TagCount = &tagCount } - if val := resp.Header.Get("x-ms-error-code"); val != "" { - result.ErrorCode = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -1172,18 +1198,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // Generated from API version 2023-08-03 // - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -1204,15 +1233,12 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1220,11 +1246,14 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1239,18 +1268,21 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientGetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -1308,82 +1340,61 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option // getPropertiesHandleResponse handles the GetProperties response. func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { result := BlobClientGetPropertiesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.LastModified = &lastModified + result.AccessTierChangeTime = &accessTierChangeTime } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CreationTime = &creationTime - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } + result.AccessTierInferred = &accessTierInferred } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { - isIncrementalCopy, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsIncrementalCopy = &isIncrementalCopy + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { - result.DestinationSnapshot = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val } if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) @@ -1392,12 +1403,6 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentLength = &contentLength } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -1405,58 +1410,50 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.Date = &date + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + result.CreationTime = &creationTime } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1464,28 +1461,22 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-access-tier"); val != "" { - result.AccessTier = &val - } - if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { - accessTierInferred, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierInferred = &accessTierInferred - } - if val := resp.Header.Get("x-ms-archive-status"); val != "" { - result.ArchiveStatus = &val + result.ExpiresOn = &expiresOn } - if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { - accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierChangeTime = &accessTierChangeTime + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) @@ -1494,19 +1485,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsCurrentVersion = &isCurrentVersion } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.TagCount = &tagCount - } - if val := resp.Header.Get("x-ms-expiry-time"); val != "" { - expiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ExpiresOn = &expiresOn + result.IsIncrementalCopy = &isIncrementalCopy } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1515,8 +1499,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsSealed = &isSealed } - if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { - result.RehydratePriority = &val + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) @@ -1525,15 +1513,21 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1542,6 +1536,44 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LegalHold = &legalHold } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } return result, nil } @@ -1553,18 +1585,21 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientGetTagsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err } - return client.getTagsHandleResponse(resp) + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err } // getTagsCreateRequest creates the GetTags request. @@ -1605,12 +1640,6 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1618,6 +1647,12 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { return BlobClientGetTagsResponse{}, err } @@ -1633,18 +1668,21 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientQueryResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientQueryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err } - return client.queryHandleResponse(resp) + resp, err := client.queryHandleResponse(httpResp) + return resp, err } // queryCreateRequest creates the Query request. @@ -1707,65 +1745,75 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC // queryHandleResponse handles the Query response. func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { result := BlobClientQueryResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientQueryResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1774,9 +1822,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1789,26 +1834,8 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1817,13 +1844,14 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -1832,25 +1860,35 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } - result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1863,18 +1901,21 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1917,6 +1958,16 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1927,22 +1978,12 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -1954,18 +1995,21 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC // - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientRenewLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -2008,6 +2052,16 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s // renewLeaseHandleResponse handles the RenewLease response. func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2021,22 +2075,12 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -2047,18 +2091,21 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli // - expiryOptions - Required. Indicates mode of the expiry time // - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { return BlobClientSetExpiryResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetExpiryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err } - return client.setExpiryHandleResponse(resp) + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err } // setExpiryCreateRequest creates the SetExpiry request. @@ -2088,6 +2135,16 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti // setExpiryHandleResponse handles the SetExpiry response. func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2098,22 +2155,12 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.Date = &date - } return result, nil } @@ -2126,18 +2173,21 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err } - return client.setHTTPHeadersHandleResponse(resp) + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. @@ -2199,16 +2249,6 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { result := BlobClientSetHTTPHeadersResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -2219,12 +2259,6 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2232,6 +2266,22 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2243,18 +2293,21 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err } - return client.setImmutabilityPolicyHandleResponse(resp) + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. @@ -2292,12 +2345,6 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2315,6 +2362,12 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2325,18 +2378,21 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons // - legalHold - Specified if a legal hold should be set on the blob. // - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err } - return client.setLegalHoldHandleResponse(resp) + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err } // setLegalHoldCreateRequest creates the SetLegalHold request. @@ -2366,12 +2422,6 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2386,6 +2436,12 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC } result.LegalHold = &legalHold } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2400,18 +2456,21 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlobClientSetMetadataResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -2474,28 +2533,9 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options // setMetadataHandleResponse handles the SetMetadata response. func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { result := BlobClientSetMetadataResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2503,6 +2543,15 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -2510,11 +2559,21 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -2528,18 +2587,21 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientSetTagsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err } - return client.setTagsHandleResponse(resp) + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err } // setTagsCreateRequest creates the SetTags request. @@ -2586,12 +2648,6 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2599,6 +2655,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2614,18 +2676,21 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetTierResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTierResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err } - return client.setTierHandleResponse(resp) + resp, err := client.setTierHandleResponse(httpResp) + return resp, err } // setTierCreateRequest creates the SetTier request. @@ -2692,18 +2757,21 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err } - return client.startCopyFromURLHandleResponse(resp) + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. @@ -2790,6 +2858,22 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop // startCopyFromURLHandleResponse handles the StartCopyFromURL response. func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2800,9 +2884,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -2812,19 +2893,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } @@ -2834,18 +2902,21 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B // Generated from API version 2023-08-03 // - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error req, err := client.undeleteCreateRequest(ctx, options) if err != nil { return BlobClientUndeleteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientUndeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err } - return client.undeleteHandleResponse(resp) + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err } // undeleteCreateRequest creates the Undelete request. @@ -2874,12 +2945,6 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2887,5 +2952,11 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index 6b2def53f8f..bfd7f5eac7d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -47,18 +46,21 @@ type BlockBlobClient struct { // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err } - return client.commitBlockListHandleResponse(resp) + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err } // commitBlockListCreateRequest creates the CommitBlockList request. @@ -163,15 +165,15 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, // commitBlockListHandleResponse handles the CommitBlockList response. func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { result := BlockBlobClientCommitBlockListResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -180,44 +182,44 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.Date = &date } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.Date = &date + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -231,18 +233,21 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err } - return client.getBlockListHandleResponse(resp) + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err } // getBlockListCreateRequest creates the GetBlockList request. @@ -278,28 +283,35 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li // getBlockListHandleResponse handles the GetBlockList response. func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { result := BlockBlobClientGetBlockListResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.LastModified = &lastModified + result.BlobContentLength = &blobContentLength } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.BlobContentLength = &blobContentLength + result.Date = &date } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -307,13 +319,6 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { return BlockBlobClientGetBlockListResponse{}, err } @@ -342,18 +347,21 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err } - return client.putBlobFromURLHandleResponse(resp) + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err } // putBlobFromURLCreateRequest creates the PutBlobFromURL request. @@ -472,15 +480,8 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, // putBlobFromURLHandleResponse handles the PutBlobFromURL response. func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { result := BlockBlobClientPutBlobFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -489,18 +490,6 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -508,6 +497,15 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -515,11 +513,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -538,18 +546,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err } - return client.stageBlockHandleResponse(resp) + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err } // stageBlockCreateRequest creates the StageBlock request. @@ -601,6 +612,16 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc // stageBlockHandleResponse handles the StageBlock response. func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -608,15 +629,6 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -624,12 +636,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -638,11 +649,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -665,18 +676,21 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err } - return client.stageBlockFromURLHandleResponse(resp) + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err } // stageBlockFromURLCreateRequest creates the StageBlockFromURL request. @@ -744,12 +758,8 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex // stageBlockFromURLHandleResponse handles the StageBlockFromURL response. func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { result := BlockBlobClientStageBlockFromURLResponse{} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -758,14 +768,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -774,6 +782,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -781,11 +795,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -806,18 +820,21 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientUploadResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientUploadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err } - return client.uploadHandleResponse(resp) + resp, err := client.uploadHandleResponse(httpResp) + return resp, err } // uploadCreateRequest creates the Upload request. @@ -923,15 +940,8 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL // uploadHandleResponse handles the Upload response. func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { result := BlockBlobClientUploadResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -940,18 +950,6 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -959,6 +957,15 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -966,11 +973,21 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index b9d306cacf4..95af9e15447 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 8d325a3a58c..ce1ff6fdd75 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -42,18 +41,21 @@ type ContainerClient struct { // - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -91,6 +93,16 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du // acquireLeaseHandleResponse handles the AcquireLease response. func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -104,22 +116,12 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -131,18 +133,21 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -179,6 +184,16 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti // breakLeaseHandleResponse handles the BreakLease response. func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -197,22 +212,12 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -228,18 +233,21 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -275,6 +283,16 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea // changeLeaseHandleResponse handles the ChangeLease response. func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -288,22 +306,12 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -315,18 +323,21 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. // - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) if err != nil { return ContainerClientCreateResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -368,6 +379,16 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options // createHandleResponse handles the Create response. func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -378,22 +399,12 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.Date = &date - } return result, nil } @@ -406,18 +417,21 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientDeleteResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -455,12 +469,6 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -468,6 +476,12 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -479,18 +493,21 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ContainerClientFilterBlobsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. @@ -530,12 +547,6 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -543,6 +554,12 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ContainerClientFilterBlobsResponse{}, err } @@ -558,18 +575,21 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err } - return client.getAccessPolicyHandleResponse(resp) + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. @@ -602,6 +622,16 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -612,22 +642,12 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { return ContainerClientGetAccessPolicyResponse{}, err } @@ -641,18 +661,21 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -673,15 +696,12 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -689,11 +709,14 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -706,18 +729,21 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -746,42 +772,12 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o // getPropertiesHandleResponse handles the GetProperties response. func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { result := ContainerClientGetPropertiesResponse{} - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -789,8 +785,18 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) @@ -806,22 +812,42 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.HasLegalHold = &hasLegalHold } - if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { - result.DefaultEncryptionScope = &val - } - if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { - denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } - if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { - isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -868,17 +894,11 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { result := ContainerClientListBlobFlatSegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -887,6 +907,12 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { return ContainerClientListBlobFlatSegmentResponse{}, err } @@ -907,23 +933,16 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) - } return client.ListBlobHierarchySegmentHandleResponse(resp) }, }) @@ -966,17 +985,11 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context // ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { result := ContainerClientListBlobHierarchySegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -985,6 +998,12 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } @@ -1000,18 +1019,21 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1046,6 +1068,16 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1056,22 +1088,12 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -1082,18 +1104,21 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( // - sourceContainerName - Required. Specifies the name of the container to rename. // - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { return ContainerClientRenameResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenameResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err } - return client.renameHandleResponse(resp) + resp, err := client.renameHandleResponse(httpResp) + return resp, err } // renameCreateRequest creates the Rename request. @@ -1127,12 +1152,6 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1140,6 +1159,12 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1152,18 +1177,21 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1198,6 +1226,16 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas // renewLeaseHandleResponse handles the RenewLease response. func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1211,22 +1249,12 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -1236,18 +1264,21 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co // Generated from API version 2023-08-03 // - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error req, err := client.restoreCreateRequest(ctx, options) if err != nil { return ContainerClientRestoreResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRestoreResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err } - return client.restoreHandleResponse(resp) + resp, err := client.restoreHandleResponse(httpResp) + return resp, err } // restoreCreateRequest creates the Restore request. @@ -1283,12 +1314,6 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1296,6 +1321,12 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1310,18 +1341,21 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err } - return client.setAccessPolicyHandleResponse(resp) + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. @@ -1367,6 +1401,16 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, // setAccessPolicyHandleResponse handles the SetAccessPolicy response. func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1377,22 +1421,12 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.Date = &date - } return result, nil } @@ -1404,18 +1438,21 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetMetadataResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -1455,6 +1492,16 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt // setMetadataHandleResponse handles the SetMetadata response. func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1465,22 +1512,12 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.Date = &date - } return result, nil } @@ -1494,18 +1531,21 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C // - body - Initial data // - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 1fed5f630bd..7251de83952 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -26,89 +25,6 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -type AppendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -type AppendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -type AppendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -type AppendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. -type AppendPositionAccessConditions struct { - // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. - // Append Block will succeed only if the append position is equal to this number. If - // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - AppendPosition *int64 - // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would - // cause the blob to exceed that limit or if the blob size is already greater than - // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - - // Precondition Failed). - MaxSize *int64 -} - // ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. type ArrowConfiguration struct { // REQUIRED @@ -124,405 +40,11 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -type BlobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -type BlobClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -type BlobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -type BlobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -type BlobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -type BlobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -type BlobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -type BlobClientDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - DeleteType *DeleteType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -type BlobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -type BlobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -type BlobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -type BlobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -type BlobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -type BlobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -type BlobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -type BlobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. -type BlobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -type BlobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -type BlobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -type BlobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -type BlobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -type BlobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -type BlobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - type BlobFlatListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` } -// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string -} - type BlobHierarchyListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` @@ -646,145 +168,6 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. -type BlockBlobClientCommitBlockListOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -type BlockBlobClientGetBlockListOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. -type BlockBlobClientPutBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. - CopySourceTags *BlobCopySourceTags - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. -type BlockBlobClientStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -type BlockBlobClientStageBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -type BlockBlobClientUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - type BlockList struct { CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` @@ -804,274 +187,6 @@ type ClearRange struct { Start *int64 `xml:"Start"` } -// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -type ContainerClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -type ContainerClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -type ContainerClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -type ContainerClientCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -type ContainerClientDeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. -type ContainerClientFilterBlobsOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []FilterBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. -type ContainerClientGetAccessPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. -type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type ContainerClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager -// method. -type ContainerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager -// method. -type ContainerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -type ContainerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -type ContainerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -type ContainerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -type ContainerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. -type ContainerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -type ContainerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -type ContainerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - // ContainerItem - An Azure Storage container type ContainerItem struct { // REQUIRED @@ -1133,27 +248,6 @@ type CORSRule struct { MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` } -// CPKInfo contains a group of parameters for the BlobClient.Download method. -type CPKInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - // DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. type DelimitedTextConfiguration struct { // The string used to separate columns. @@ -1225,12 +319,6 @@ type KeyInfo struct { Start *string `xml:"Start"` } -// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { // REQUIRED @@ -1310,195 +398,6 @@ type Metrics struct { Version *string `xml:"Version"` } -// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -type PageBlobClientClearPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. -type PageBlobClientCopyIncrementalOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -type PageBlobClientCreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager -// method. -type PageBlobClientGetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - Prevsnapshot *string - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. -type PageBlobClientGetPageRangesOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -type PageBlobClientResizeOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -type PageBlobClientUpdateSequenceNumberOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. -type PageBlobClientUploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -type PageBlobClientUploadPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - // PageList - the list of pages type PageList struct { ClearRange []*ClearRange `xml:"ClearRange"` @@ -1561,122 +460,6 @@ type RetentionPolicy struct { Days *int32 `xml:"Days"` } -// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -type ServiceClientFilterBlobsOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []FilterBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -type ServiceClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -type ServiceClientGetStatisticsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. -type ServiceClientGetUserDelegationKeyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager -// method. -type ServiceClientListContainersSegmentOptions struct { - // Include this parameter to specify that the container's metadata be returned as part of the response body. - Include []ListContainersIncludeType - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -type ServiceClientSetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -type ServiceClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - // SignedIdentifier - signed identifier type SignedIdentifier struct { // REQUIRED; An Access policy @@ -1686,20 +469,6 @@ type SignedIdentifier struct { ID *string `xml:"Id"` } -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - // StaticWebsite - The properties that enable an account to host a static website type StaticWebsite struct { // REQUIRED; Indicates whether this account is hosting a static website diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index dc5dba1037a..7e094db8730 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -24,12 +23,12 @@ func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), } return enc.EncodeElement(aux, start) } @@ -39,8 +38,8 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(a), } @@ -106,25 +105,25 @@ func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) err type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), } if b.ContentMD5 != nil { encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) @@ -138,15 +137,15 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(b), } @@ -249,12 +248,12 @@ func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), } return enc.EncodeElement(aux, start) } @@ -264,8 +263,8 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(c), } @@ -297,10 +296,10 @@ func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) err type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), } return enc.EncodeElement(aux, start) } @@ -310,7 +309,7 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(g), } @@ -414,12 +413,12 @@ func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), } return enc.EncodeElement(aux, start) } @@ -429,8 +428,8 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(u), } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 00000000000..216f8b73ae9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1469 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index b41644c99f1..bfa9883f5ce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -41,18 +40,21 @@ type PageBlobClient struct { // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientClearPagesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientClearPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err } - return client.clearPagesHandleResponse(resp) + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err } // clearPagesCreateRequest creates the ClearPages request. @@ -122,15 +124,22 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte // clearPagesHandleResponse handles the ClearPages response. func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { result := PageBlobClientClearPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -139,22 +148,22 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.Date = &date } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -162,13 +171,6 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } return result, nil } @@ -187,18 +189,21 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err } - return client.copyIncrementalHandleResponse(resp) + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err } // copyIncrementalCreateRequest creates the CopyIncremental request. @@ -240,6 +245,22 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, // copyIncrementalHandleResponse handles the CopyIncremental response. func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -250,28 +271,12 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } @@ -289,18 +294,21 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientCreateResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -401,15 +409,8 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe // createHandleResponse handles the Create response. func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { result := PageBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -418,18 +419,6 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -437,6 +426,15 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -444,11 +442,21 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -467,23 +475,16 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) - } - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } - resp, err := client.internal.Pipeline().Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesHandleResponse(resp) }, }) @@ -542,16 +543,6 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op // GetPageRangesHandleResponse handles the GetPageRanges response. func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { result := PageBlobClientGetPageRangesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -562,12 +553,6 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -575,6 +560,22 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesResponse{}, err } @@ -595,23 +596,16 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesDiffHandleResponse(resp) }, }) @@ -676,16 +670,6 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context // GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { result := PageBlobClientGetPageRangesDiffResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -696,12 +680,6 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -709,6 +687,22 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } @@ -727,18 +721,21 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientResizeResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientResizeResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err } - return client.resizeHandleResponse(resp) + resp, err := client.resizeHandleResponse(httpResp) + return resp, err } // resizeCreateRequest creates the Resize request. @@ -795,16 +792,6 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte // resizeHandleResponse handles the Resize response. func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { result := PageBlobClientResizeResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -815,12 +802,6 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -828,6 +809,22 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -842,18 +839,21 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err } - return client.updateSequenceNumberHandleResponse(resp) + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err } // updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. @@ -901,16 +901,6 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont // updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { result := PageBlobClientUpdateSequenceNumberResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -921,12 +911,6 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -934,6 +918,22 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -951,18 +951,21 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err } - return client.uploadPagesHandleResponse(resp) + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err } // uploadPagesCreateRequest creates the UploadPages request. @@ -1041,22 +1044,15 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont // uploadPagesHandleResponse handles the UploadPages response. func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { result := PageBlobClientUploadPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.LastModified = &lastModified + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1065,21 +1061,12 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1088,6 +1075,15 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1095,11 +1091,18 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1126,18 +1129,21 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err } - return client.uploadPagesFromURLHandleResponse(resp) + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err } // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. @@ -1228,22 +1234,12 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex // uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { result := PageBlobClientUploadPagesFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobSequenceNumber = &blobSequenceNumber } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1252,18 +1248,12 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1272,6 +1262,15 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1279,11 +1278,18 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index b52664c938e..738d23c8f19 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -656,18 +655,20 @@ type BlobClientGetPropertiesResponse struct { // BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. type BlobClientGetTagsResponse struct { + // Blob tags BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlobClientQueryResponse contains the response from method BlobClient.Query. @@ -1051,29 +1052,30 @@ type BlockBlobClientCommitBlockListResponse struct { // BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. type BlockBlobClientGetBlockListResponse struct { BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. @@ -1318,45 +1320,47 @@ type ContainerClientDeleteResponse struct { // ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + BlobPublicAccess *PublicAccessType // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // a collection of signed identifiers SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. @@ -1434,40 +1438,44 @@ type ContainerClientGetPropertiesResponse struct { // ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. @@ -1697,52 +1705,56 @@ type PageBlobClientCreateResponse struct { // PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. type PageBlobClientGetPageRangesResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. @@ -1870,18 +1882,20 @@ type PageBlobClientUploadPagesResponse struct { // ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. @@ -1910,60 +1924,68 @@ type ServiceClientGetAccountInfoResponse struct { // ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index faeefdc5322..9a73b7301b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -38,18 +37,21 @@ type ServiceClient struct { // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. @@ -88,12 +90,6 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -101,6 +97,12 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -113,18 +115,21 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // Generated from API version 2023-08-03 // - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -145,15 +150,12 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -161,12 +163,6 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { @@ -174,6 +170,15 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -184,18 +189,21 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // Generated from API version 2023-08-03 // - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -244,18 +252,21 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // Generated from API version 2023-08-03 // - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err } - return client.getStatisticsHandleResponse(resp) + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err } // getStatisticsCreateRequest creates the GetStatistics request. @@ -285,12 +296,6 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -298,6 +303,12 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { return ServiceClientGetStatisticsResponse{}, err } @@ -313,18 +324,21 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err } - return client.getUserDelegationKeyHandleResponse(resp) + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. @@ -357,12 +371,6 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -370,6 +378,12 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } @@ -441,18 +455,21 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // - storageServiceProperties - The StorageService properties. // - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err } - return client.setPropertiesHandleResponse(resp) + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err } // setPropertiesCreateRequest creates the SetProperties request. @@ -504,18 +521,21 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // - body - Initial data // - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - resp, err := client.internal.Pipeline().Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 4b4d51aa399..58665032972 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,29 +14,29 @@ import ( ) const ( - rfc1123JSON = `"` + time.RFC1123 + `"` + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` ) -type timeRFC1123 time.Time +type dateTimeRFC1123 time.Time -func (t timeRFC1123) MarshalJSON() ([]byte, error) { - b := []byte(time.Time(t).Format(rfc1123JSON)) +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) return b, nil } -func (t timeRFC1123) MarshalText() ([]byte, error) { +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { b := []byte(time.Time(t).Format(time.RFC1123)) return b, nil } -func (t *timeRFC1123) UnmarshalJSON(data []byte) error { - p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) - *t = timeRFC1123(p) +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) return err } -func (t *timeRFC1123) UnmarshalText(data []byte) error { +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { p, err := time.Parse(time.RFC1123, string(data)) - *t = timeRFC1123(p) + *t = dateTimeRFC1123(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 1ce9d621164..82b370133fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,45 +14,45 @@ import ( "time" ) -const ( - utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` - utcLayout = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` -) - // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) -type timeRFC3339 time.Time +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time -func (t timeRFC3339) MarshalJSON() (json []byte, err error) { +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { tt := time.Time(t) return tt.MarshalJSON() } -func (t timeRFC3339) MarshalText() (text []byte, err error) { +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { tt := time.Time(t) return tt.MarshalText() } -func (t *timeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcLayoutJSON +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON if tzOffsetRegex.Match(data) { - layout = rfc3339JSON + layout = dateTimeJSON } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - layout := utcLayout +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime if tzOffsetRegex.Match(data) { layout = time.RFC3339Nano } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) Parse(layout, value string) error { +func (t *dateTimeRFC3339) Parse(layout, value string) error { p, err := time.Parse(layout, strings.ToUpper(value)) - *t = timeRFC3339(p) + *t = dateTimeRFC3339(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 144ea18e1ab..1bd0e4de05a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -3,14 +3,16 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" "strings" ) @@ -19,22 +21,32 @@ type additionalProperties map[string]*string // UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } switch tt := t.(type) { case xml.StartElement: tokName = strings.ToLower(tt.Name.Local) - break + tokValue = "" case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: if tokName == "" { continue } if *ap == nil { *ap = additionalProperties{} } - s := string(tt) - (*ap)[tokName] = &s + (*ap)[tokName] = to.Ptr(tokValue) tokName = "" - break } } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 1de60999ec5..c131facf7b7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -144,9 +144,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err // SerializeBlobTags converts tags to generated.BlobTags func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - if len(tagsMap) == 0 { - return nil - } blobTagSet := make([]*generated.BlobTag, 0) for key, val := range tagsMap { newKey, newVal := key, val @@ -257,3 +254,27 @@ func IsIPEndpointStyle(host string) bool { } return net.ParseIP(host) != nil } + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 7e534cee185..14e90a1fd66 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -8,6 +8,7 @@ package pageblob import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "net/http" "net/url" @@ -426,6 +427,12 @@ func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return pb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return pb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index f86286051de..57d0e2777e1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -18,6 +18,8 @@ import ( "encoding/pem" "errors" "fmt" + "os" + "strings" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" @@ -315,16 +317,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client if err != nil { return Client{}, err } - + autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION") opts := clientOptions{ authority: authority, // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache disableInstanceDiscovery: cred.tokenProvider != nil, httpClient: shared.DefaultClient, + azureRegion: autoEnabledRegion, } for _, o := range options { o(&opts) } + if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") { + opts.azureRegion = "" + } + baseOpts := []base.Option{ base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 09a0d92f520..e473d1267da 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -89,8 +89,23 @@ type AuthResult struct { ExpiresOn time.Time GrantedScopes []string DeclinedScopes []string + Metadata AuthResultMetadata } +// AuthResultMetadata which contains meta data for the AuthResult +type AuthResultMetadata struct { + TokenSource TokenSource +} + +type TokenSource int + +// These are all the types of token flows. +const ( + SourceUnknown TokenSource = 0 + IdentityProvider TokenSource = 1 + Cache TokenSource = 2 +) + // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { if err := storageTokenResponse.AccessToken.Validate(); err != nil { @@ -109,7 +124,17 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) } } - return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil + return AuthResult{ + Account: account, + IDToken: idToken, + AccessToken: accessToken, + ExpiresOn: storageTokenResponse.AccessToken.ExpiresOn.T, + GrantedScopes: grantedScopes, + DeclinedScopes: nil, + Metadata: AuthResultMetadata{ + TokenSource: Cache, + }, + }, nil } // NewAuthResult creates an AuthResult. @@ -123,6 +148,9 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco AccessToken: tokenResponse.AccessToken, ExpiresOn: tokenResponse.ExpiresOn.T, GrantedScopes: tokenResponse.GrantedScopes.Slice, + Metadata: AuthResultMetadata{ + TokenSource: IdentityProvider, + }, }, nil } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 2238521f5f9..2134e57c9e4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -18,10 +18,6 @@ import ( ) const addField = "AdditionalFields" -const ( - marshalJSON = "MarshalJSON" - unmarshalJSON = "UnmarshalJSON" -) var ( leftBrace = []byte("{")[0] @@ -106,48 +102,38 @@ func delimIs(got json.Token, want rune) bool { // hasMarshalJSON will determine if the value or a pointer to this value has // the MarshalJSON method. func hasMarshalJSON(v reflect.Value) bool { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - } else { - if !v.CanAddr() { - return false + ok := false + if _, ok = v.Interface().(json.Marshaler); !ok { + var i any + if v.Kind() == reflect.Ptr { + i = v.Elem().Interface() + } else if v.CanAddr() { + i = v.Addr().Interface() } - v = v.Addr() - } - - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok + _, ok = i.(json.Marshaler) } - return false + return ok } // callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. // This will panic if the method is not defined. func callMarshalJSON(v reflect.Value) ([]byte, error) { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) + if marsh, ok := v.Interface().(json.Marshaler); ok { return marsh.MarshalJSON() } if v.Kind() == reflect.Ptr { - v = v.Elem() + if marsh, ok := v.Elem().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } else { if v.CanAddr() { - v = v.Addr() + if marsh, ok := v.Addr().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) - return marsh.MarshalJSON() - } - panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) } @@ -162,12 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool { v = v.Addr() } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Unmarshaler) - return ok - } - - return false + _, ok := v.Interface().(json.Unmarshaler) + return ok } // hasOmitEmpty indicates if the field has instructed us to not output diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index 04236ff3127..fda5d7dd333 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -7,6 +7,7 @@ package local import ( "context" "fmt" + "html" "net" "net/http" "strconv" @@ -141,7 +142,7 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { - desc := q.Get("error_description") + desc := html.EscapeString(q.Get("error_description")) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index ef8d908a444..5dd9fe08f25 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -331,7 +331,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) if err != nil { - return fmt.Errorf("unable to resolve an endpoint: %s", err) + return fmt.Errorf("unable to resolve an endpoint: %w", err) } authParams.Endpoints = endpoints return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 9d60734f88e..360a9f07b0a 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -47,13 +47,12 @@ type jsonCaller interface { } var aadTrustedHostList = map[string]bool{ - "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list - "login.chinacloudapi.cn": true, // Microsoft Azure China - "login.microsoftonline.de": true, // Microsoft Azure Blackforest - "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy - "login.microsoftonline.us": true, // Microsoft Azure US Government - "login.microsoftonline.com": true, // Microsoft Azure Worldwide - "login.cloudgovapi.us": true, // Microsoft Azure US Government + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.partner.microsoftonline.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide } // TrustedHost checks if an AAD host is trusted/valid. @@ -543,17 +542,19 @@ func detectRegion(ctx context.Context) string { client := http.Client{ Timeout: time.Duration(2 * time.Second), } - req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil) req.Header.Set("Metadata", "true") resp, err := client.Do(req) + if err == nil { + defer resp.Body.Close() + } // If the request times out or there is an error, it is retried once - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { resp, err = client.Do(req) - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { return "" } } - defer resp.Body.Close() response, err := io.ReadAll(resp.Body) if err != nil { return "" diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index ac2d47a1644..40f5f333b5f 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9@sha256:f30dbf77b075215f6c827c269c073b5e0973e5cea8dacdf7ecb6a19c868f37f2 +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.10@sha256:de2a0a20c1c3b39c3de829196de9694d09f97cd18fda1004de855ed2b4c841ba USER root diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go index f629a6a2e7f..a6fa3d4a2ec 100644 --- a/vendor/github.com/IBM/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -1101,7 +1101,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) @@ -1134,7 +1134,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go index facf7664367..f2f197887c9 100644 --- a/vendor/github.com/IBM/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -387,7 +387,7 @@ type Config struct { // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. + // Equivalent to the JVM's `fetch.max.wait.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index 55283cfe4f0..204768e3203 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.9' services: zookeeper-1: hostname: 'zookeeper-1' diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go index 1bf54590897..2948651272b 100644 --- a/vendor/github.com/IBM/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -251,18 +251,31 @@ func (om *offsetManager) Commit() { } func (om *offsetManager) flushToBroker() { + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + // Care needs to be taken to unlock this. Don't want to defer the unlock as this would + // cause the lock to be held while waiting for the broker to reply. + broker.lock.Lock() req := om.constructRequest() if req == nil { + broker.lock.Unlock() return } + resp, rp, err := sendOffsetCommit(broker, req) + broker.lock.Unlock() - broker, err := om.coordinator() if err != nil { om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() return } - resp, err := broker.CommitOffset(req) + err = handleResponsePromise(req, resp, rp, nil) if err != nil { om.handleError(err) om.releaseCoordinator(broker) @@ -270,9 +283,20 @@ func (om *offsetManager) flushToBroker() { return } + broker.handleThrottledResponse(resp) om.handleResponse(broker, req, resp) } +func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) { + resp := new(OffsetCommitResponse) + responseHeaderVersion := resp.headerVersion() + promise, err := coordinator.send(req, true, responseHeaderVersion) + if err != nil { + return nil, nil, err + } + return resp, promise, nil +} + func (om *offsetManager) constructRequest() *OffsetCommitRequest { r := &OffsetCommitRequest{ Version: 1, diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go index ca7e13dab0e..bf20b75e905 100644 --- a/vendor/github.com/IBM/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -466,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, resultOffsets = failedTxn if len(resultOffsets) == 0 { - DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n", t.transactionalID, groupId) return resultOffsets, false, nil } diff --git a/vendor/github.com/VividCortex/gohistogram/.gitignore b/vendor/github.com/VividCortex/gohistogram/.gitignore deleted file mode 100644 index 4c51178c9a2..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -\#* -.\#* \ No newline at end of file diff --git a/vendor/github.com/VividCortex/gohistogram/README.md b/vendor/github.com/VividCortex/gohistogram/README.md deleted file mode 100644 index eeb14d36664..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# gohistogram - Histograms in Go - -![build status](https://circleci.com/gh/VividCortex/gohistogram.png?circle-token=d37ec652ea117165cd1b342400a801438f575209) - -This package provides [Streaming Approximate Histograms](https://vividcortex.com/blog/2013/07/08/streaming-approximate-histograms/) -for efficient quantile approximations. - -The histograms in this package are based on the algorithms found in -Ben-Haim & Yom-Tov's *A Streaming Parallel Decision Tree Algorithm* -([PDF](http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf)). -Histogram bins do not have a preset size. As values stream into -the histogram, bins are dynamically added and merged. - -Another implementation can be found in the Apache Hive project (see -[NumericHistogram](http://hive.apache.org/docs/r0.11.0/api/org/apache/hadoop/hive/ql/udf/generic/NumericHistogram.html)). - -An example: - -![histogram](http://i.imgur.com/5OplaRs.png) - -The accurate method of calculating quantiles (like percentiles) requires -data to be sorted. Streaming histograms make it possible to approximate -quantiles without sorting (or even individually storing) values. - -NumericHistogram is the more basic implementation of a streaming -histogram. WeightedHistogram implements bin values as exponentially-weighted -moving averages. - -A maximum bin size is passed as an argument to the constructor methods. A -larger bin size yields more accurate approximations at the cost of increased -memory utilization and performance. - -A picture of kittens: - -![stack of kittens](http://i.imgur.com/QxRTWAE.jpg) - -## Getting started - -### Using in your own code - - $ go get github.com/VividCortex/gohistogram - -```go -import "github.com/VividCortex/gohistogram" -``` - -### Running tests and making modifications - -Get the code into your workspace: - - $ cd $GOPATH - $ git clone git@github.com:VividCortex/gohistogram.git ./src/github.com/VividCortex/gohistogram - -You can run the tests now: - - $ cd src/github.com/VividCortex/gohistogram - $ go test . - -## API Documentation - -Full source documentation can be found [here][godoc]. - -[godoc]: http://godoc.org/github.com/VividCortex/gohistogram - -## Contributing - -We only accept pull requests for minor fixes or improvements. This includes: - -* Small bug fixes -* Typos -* Documentation or comments - -Please open issues to discuss new features. Pull requests for new features will be rejected, -so we recommend forking the repository and making changes in your fork for your use case. - -## License - -Copyright (c) 2013 VividCortex - -Released under MIT License. Check `LICENSE` file for details. diff --git a/vendor/github.com/VividCortex/gohistogram/histogram.go b/vendor/github.com/VividCortex/gohistogram/histogram.go deleted file mode 100644 index ede21fd311b..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/histogram.go +++ /dev/null @@ -1,23 +0,0 @@ -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -// Histogram is the interface that wraps the Add and Quantile methods. -type Histogram interface { - // Add adds a new value, n, to the histogram. Trimming is done - // automatically. - Add(n float64) - - // Quantile returns an approximation. - Quantile(n float64) (q float64) - - // String returns a string reprentation of the histogram, - // which is useful for printing to a terminal. - String() (str string) -} - -type bin struct { - value float64 - count float64 -} diff --git a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go b/vendor/github.com/VividCortex/gohistogram/numerichistogram.go deleted file mode 100644 index 20dea740d19..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go +++ /dev/null @@ -1,160 +0,0 @@ -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -import ( - "fmt" -) - -type NumericHistogram struct { - bins []bin - maxbins int - total uint64 -} - -// NewHistogram returns a new NumericHistogram with a maximum of n bins. -// -// There is no "optimal" bin count, but somewhere between 20 and 80 bins -// should be sufficient. -func NewHistogram(n int) *NumericHistogram { - return &NumericHistogram{ - bins: make([]bin, 0), - maxbins: n, - total: 0, - } -} - -func (h *NumericHistogram) Add(n float64) { - defer h.trim() - h.total++ - for i := range h.bins { - if h.bins[i].value == n { - h.bins[i].count++ - return - } - - if h.bins[i].value > n { - - newbin := bin{value: n, count: 1} - head := append(make([]bin, 0), h.bins[0:i]...) - - head = append(head, newbin) - tail := h.bins[i:] - h.bins = append(head, tail...) - return - } - } - - h.bins = append(h.bins, bin{count: 1, value: n}) -} - -func (h *NumericHistogram) Quantile(q float64) float64 { - count := q * float64(h.total) - for i := range h.bins { - count -= float64(h.bins[i].count) - - if count <= 0 { - return h.bins[i].value - } - } - - return -1 -} - -// CDF returns the value of the cumulative distribution function -// at x -func (h *NumericHistogram) CDF(x float64) float64 { - count := 0.0 - for i := range h.bins { - if h.bins[i].value <= x { - count += float64(h.bins[i].count) - } - } - - return count / float64(h.total) -} - -// Mean returns the sample mean of the distribution -func (h *NumericHistogram) Mean() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - - for i := range h.bins { - sum += h.bins[i].value * h.bins[i].count - } - - return sum / float64(h.total) -} - -// Variance returns the variance of the distribution -func (h *NumericHistogram) Variance() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - mean := h.Mean() - - for i := range h.bins { - sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) - } - - return sum / float64(h.total) -} - -func (h *NumericHistogram) Count() float64 { - return float64(h.total) -} - -// trim merges adjacent bins to decrease the bin count to the maximum value -func (h *NumericHistogram) trim() { - for len(h.bins) > h.maxbins { - // Find closest bins in terms of value - minDelta := 1e99 - minDeltaIndex := 0 - for i := range h.bins { - if i == 0 { - continue - } - - if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { - minDelta = delta - minDeltaIndex = i - } - } - - // We need to merge bins minDeltaIndex-1 and minDeltaIndex - totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count - mergedbin := bin{ - value: (h.bins[minDeltaIndex-1].value* - h.bins[minDeltaIndex-1].count + - h.bins[minDeltaIndex].value* - h.bins[minDeltaIndex].count) / - totalCount, // weighted average - count: totalCount, // summed heights - } - head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) - tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) - h.bins = append(head, tail...) - } -} - -// String returns a string reprentation of the histogram, -// which is useful for printing to a terminal. -func (h *NumericHistogram) String() (str string) { - str += fmt.Sprintln("Total:", h.total) - - for i := range h.bins { - var bar string - for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { - bar += "." - } - str += fmt.Sprintln(h.bins[i].value, "\t", bar) - } - - return -} diff --git a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go b/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go deleted file mode 100644 index 16eed37193b..00000000000 --- a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go +++ /dev/null @@ -1,190 +0,0 @@ -// Package gohistogram contains implementations of weighted and exponential histograms. -package gohistogram - -// Copyright (c) 2013 VividCortex, Inc. All rights reserved. -// Please see the LICENSE file for applicable license terms. - -import "fmt" - -// A WeightedHistogram implements Histogram. A WeightedHistogram has bins that have values -// which are exponentially weighted moving averages. This allows you keep inserting large -// amounts of data into the histogram and approximate quantiles with recency factored in. -type WeightedHistogram struct { - bins []bin - maxbins int - total float64 - alpha float64 -} - -// NewWeightedHistogram returns a new WeightedHistogram with a maximum of n bins with a decay factor -// of alpha. -// -// There is no "optimal" bin count, but somewhere between 20 and 80 bins should be -// sufficient. -// -// Alpha should be set to 2 / (N+1), where N represents the average age of the moving window. -// For example, a 60-second window with an average age of 30 seconds would yield an -// alpha of 0.064516129. -func NewWeightedHistogram(n int, alpha float64) *WeightedHistogram { - return &WeightedHistogram{ - bins: make([]bin, 0), - maxbins: n, - total: 0, - alpha: alpha, - } -} - -func ewma(existingVal float64, newVal float64, alpha float64) (result float64) { - result = newVal*(1-alpha) + existingVal*alpha - return -} - -func (h *WeightedHistogram) scaleDown(except int) { - for i := range h.bins { - if i != except { - h.bins[i].count = ewma(h.bins[i].count, 0, h.alpha) - } - } -} - -func (h *WeightedHistogram) Add(n float64) { - defer h.trim() - for i := range h.bins { - if h.bins[i].value == n { - h.bins[i].count++ - - defer h.scaleDown(i) - return - } - - if h.bins[i].value > n { - - newbin := bin{value: n, count: 1} - head := append(make([]bin, 0), h.bins[0:i]...) - - head = append(head, newbin) - tail := h.bins[i:] - h.bins = append(head, tail...) - - defer h.scaleDown(i) - return - } - } - - h.bins = append(h.bins, bin{count: 1, value: n}) -} - -func (h *WeightedHistogram) Quantile(q float64) float64 { - count := q * h.total - for i := range h.bins { - count -= float64(h.bins[i].count) - - if count <= 0 { - return h.bins[i].value - } - } - - return -1 -} - -// CDF returns the value of the cumulative distribution function -// at x -func (h *WeightedHistogram) CDF(x float64) float64 { - count := 0.0 - for i := range h.bins { - if h.bins[i].value <= x { - count += float64(h.bins[i].count) - } - } - - return count / h.total -} - -// Mean returns the sample mean of the distribution -func (h *WeightedHistogram) Mean() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - - for i := range h.bins { - sum += h.bins[i].value * h.bins[i].count - } - - return sum / h.total -} - -// Variance returns the variance of the distribution -func (h *WeightedHistogram) Variance() float64 { - if h.total == 0 { - return 0 - } - - sum := 0.0 - mean := h.Mean() - - for i := range h.bins { - sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) - } - - return sum / h.total -} - -func (h *WeightedHistogram) Count() float64 { - return h.total -} - -func (h *WeightedHistogram) trim() { - total := 0.0 - for i := range h.bins { - total += h.bins[i].count - } - h.total = total - for len(h.bins) > h.maxbins { - - // Find closest bins in terms of value - minDelta := 1e99 - minDeltaIndex := 0 - for i := range h.bins { - if i == 0 { - continue - } - - if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { - minDelta = delta - minDeltaIndex = i - } - } - - // We need to merge bins minDeltaIndex-1 and minDeltaIndex - totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count - mergedbin := bin{ - value: (h.bins[minDeltaIndex-1].value* - h.bins[minDeltaIndex-1].count + - h.bins[minDeltaIndex].value* - h.bins[minDeltaIndex].count) / - totalCount, // weighted average - count: totalCount, // summed heights - } - head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) - tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) - h.bins = append(head, tail...) - } -} - -// String returns a string reprentation of the histogram, -// which is useful for printing to a terminal. -func (h *WeightedHistogram) String() (str string) { - str += fmt.Sprintln("Total:", h.total) - - for i := range h.bins { - var bar string - for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { - bar += "." - } - str += fmt.Sprintln(h.bins[i].value, "\t", bar) - } - - return -} diff --git a/vendor/github.com/alecthomas/units/renovate.json5 b/vendor/github.com/alecthomas/units/renovate.json5 index 897864b852f..6bb4acde94a 100644 --- a/vendor/github.com/alecthomas/units/renovate.json5 +++ b/vendor/github.com/alecthomas/units/renovate.json5 @@ -8,4 +8,8 @@ "group:allNonMajor", "schedule:earlyMondays", // Run once a week. ], + postUpdateOptions: [ + "gomodTidy", + "gomodUpdateImportPaths" + ] } diff --git a/vendor/github.com/alicebob/gopher-json/doc.go b/vendor/github.com/alicebob/gopher-json/doc.go index b73aeafd530..8cf73d45a91 100644 --- a/vendor/github.com/alicebob/gopher-json/doc.go +++ b/vendor/github.com/alicebob/gopher-json/doc.go @@ -1,33 +1,35 @@ // Package json is a simple JSON encoder/decoder for gopher-lua. // -// Documentation +// # Documentation // // The following functions are exposed by the library: -// decode(string): Decodes a JSON string. Returns nil and an error string if -// the string could not be decoded. -// encode(value): Encodes a value into a JSON string. Returns nil and an error -// string if the value could not be encoded. +// +// decode(string): Decodes a JSON string. Returns nil and an error string if +// the string could not be decoded. +// encode(value): Encodes a value into a JSON string. Returns nil and an error +// string if the value could not be encoded. // // The following types are supported: // -// Lua | JSON -// ---------+----- -// nil | null -// number | number -// string | string -// table | object: when table is non-empty and has only string keys -// | array: when table is empty, or has only sequential numeric keys -// | starting from 1 +// Lua | JSON +// ---------+----- +// nil | null +// number | number +// string | string +// table | object: when table is non-empty and has only string keys +// | array: when table is empty, or has only sequential numeric keys +// | starting from 1 // // Attempting to encode any other Lua type will result in an error. // -// Example +// # Example // // Below is an example usage of the library: -// import ( -// luajson "layeh.com/gopher-json" -// ) // -// L := lua.NewState() -// luajson.Preload(s) +// import ( +// luajson "layeh.com/gopher-json" +// ) +// +// L := lua.NewState() +// luajson.Preload(L) package json diff --git a/vendor/github.com/antchfx/xmlquery/.gitignore b/vendor/github.com/antchfx/xmlquery/.gitignore new file mode 100644 index 00000000000..4d5d27b1d3a --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/.gitignore @@ -0,0 +1,32 @@ +# vscode +.vscode +debug +*.test + +./build + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/vendor/github.com/VividCortex/gohistogram/LICENSE b/vendor/github.com/antchfx/xmlquery/LICENSE similarity index 95% rename from vendor/github.com/VividCortex/gohistogram/LICENSE rename to vendor/github.com/antchfx/xmlquery/LICENSE index d23fea36567..e14c37141c5 100644 --- a/vendor/github.com/VividCortex/gohistogram/LICENSE +++ b/vendor/github.com/antchfx/xmlquery/LICENSE @@ -1,5 +1,3 @@ -Copyright (c) 2013 VividCortex - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights @@ -16,4 +14,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xmlquery/README.md b/vendor/github.com/antchfx/xmlquery/README.md new file mode 100644 index 00000000000..4d26bfef35d --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/README.md @@ -0,0 +1,302 @@ +# xmlquery + +[![Build Status](https://github.com/antchfx/xmlquery/actions/workflows/testing.yml/badge.svg)](https://github.com/antchfx/xmlquery/actions/workflows/testing.yml) +[![GoDoc](https://godoc.org/github.com/antchfx/xmlquery?status.svg)](https://godoc.org/github.com/antchfx/xmlquery) +[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xmlquery)](https://goreportcard.com/report/github.com/antchfx/xmlquery) + +# Overview + +`xmlquery` is an XPath query package for XML documents, allowing you to extract +data or evaluate from XML documents with an XPath expression. + +`xmlquery` has a built-in query object caching feature that caches recently used +XPATH query strings. Enabling caching can avoid recompile XPath expression for +each query. + +You can visit this page to learn about the supported XPath(1.0/2.0) syntax. https://github.com/antchfx/xpath + +[htmlquery](https://github.com/antchfx/htmlquery) - Package for the HTML document query. + +[xmlquery](https://github.com/antchfx/xmlquery) - Package for the XML document query. + +[jsonquery](https://github.com/antchfx/jsonquery) - Package for the JSON document query. + +# Installation + +``` + $ go get github.com/antchfx/xmlquery +``` + +# Quick Starts + +```go +import ( + "github.com/antchfx/xmlquery" +) + +func main(){ + s := ` + + + W3Schools Home Page + https://www.w3schools.com + Free web building tutorials + + RSS Tutorial + https://www.w3schools.com/xml/xml_rss.asp + New RSS tutorial on W3Schools + + + XML Tutorial + https://www.w3schools.com/xml + New XML tutorial on W3Schools + + +` + + doc, err := xmlquery.Parse(strings.NewReader(s)) + if err != nil { + panic(err) + } + channel := xmlquery.FindOne(doc, "//channel") + if n := channel.SelectElement("title"); n != nil { + fmt.Printf("title: %s\n", n.InnerText()) + } + if n := channel.SelectElement("link"); n != nil { + fmt.Printf("link: %s\n", n.InnerText()) + } + for i, n := range xmlquery.Find(doc, "//item/title") { + fmt.Printf("#%d %s\n", i, n.InnerText()) + } +} +``` + +# Getting Started + +### Find specified XPath query. + +```go +list, err := xmlquery.QueryAll(doc, "a") +if err != nil { + panic(err) +} +``` + +#### Parse an XML from URL. + +```go +doc, err := xmlquery.LoadURL("http://www.example.com/sitemap.xml") +``` + +#### Parse an XML from string. + +```go +s := `` +doc, err := xmlquery.Parse(strings.NewReader(s)) +``` + +#### Parse an XML from io.Reader. + +```go +f, err := os.Open("../books.xml") +doc, err := xmlquery.Parse(f) +``` + +#### Parse an XML in a stream fashion (simple case without elements filtering). + +```go +f, _ := os.Open("../books.xml") +p, err := xmlquery.CreateStreamParser(f, "/bookstore/book") +for { + n, err := p.Read() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + fmt.Println(n) +} +``` + +Notes: `CreateStreamParser()` used for saving memory if your had a large XML file to parse. + +#### Parse an XML in a stream fashion (simple case advanced element filtering). + +```go +f, _ := os.Open("../books.xml") +p, err := xmlquery.CreateStreamParser(f, "/bookstore/book", "/bookstore/book[price>=10]") +for { + n, err := p.Read() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + fmt.Println(n) +} +``` + +#### Find authors of all books in the bookstore. + +```go +list := xmlquery.Find(doc, "//book//author") +// or +list := xmlquery.Find(doc, "//author") +``` + +#### Find the second book. + +```go +book := xmlquery.FindOne(doc, "//book[2]") +``` + +#### Find the last book. + +```go +book := xmlquery.FindOne(doc, "//book[last()]") +``` + +#### Find all book elements and only get `id` attribute. + +```go +list := xmlquery.Find(doc,"//book/@id") +fmt.Println(list[0].InnerText) // outout @id value +``` + +#### Find all books with id `bk104`. + +```go +list := xmlquery.Find(doc, "//book[@id='bk104']") +``` + +#### Find all books with price less than 5. + +```go +list := xmlquery.Find(doc, "//book[price<5]") +``` + +#### Evaluate total price of all books. + +```go +expr, err := xpath.Compile("sum(//book/price)") +price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64) +fmt.Printf("total price: %f\n", price) +``` + +#### Count the number of books. + +```go +expr, err := xpath.Compile("count(//book)") +count := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64) +``` + +#### Calculate the total price of all book prices. + +```go +expr, err := xpath.Compile("sum(//book/price)") +price := expr.Evaluate(xmlquery.CreateXPathNavigator(doc)).(float64) +``` + +# Advanced Features + +### Parse `UTF-16` XML file with `ParseWithOptions()`. + +```go +f, _ := os.Open(`UTF-16.XML`) +// Convert UTF-16 XML to UTF-8 +utf16ToUtf8Transformer := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewDecoder() +utf8Reader := transform.NewReader(f, utf16ToUtf8Transformer) +// Sets `CharsetReader` +options := xmlquery.ParserOptions{ + Decoder: &xmlquery.DecoderOptions{ + CharsetReader: func(charset string, input io.Reader) (io.Reader, error) { + return input, nil + }, + }, +} +doc, err := xmlquery.ParseWithOptions(utf8Reader, options) +``` + +### Query with custom namespace prefix. + +```go +s := ` + + +RequestReplyActivity +OpClientReqActivity +300 +80 + +` +nsMap := map[string]string{ + "q": "http://xmlns.xyz.com/process/2003", + "r": "http://www.w3.org/1999/XSL/Transform", + "s": "http://www.w3.org/2001/XMLSchema", +} +expr, _ := xpath.CompileWithNS("//q:activity", nsMap) +node := xmlquery.QuerySelector(doc, expr) +``` + +#### Create XML document without call `xml.Marshal`. + +```go +doc := &xmlquery.Node{ + Type: xmlquery.DeclarationNode, + Data: "xml", + Attr: []xml.Attr{ + xml.Attr{Name: xml.Name{Local: "version"}, Value: "1.0"}, + }, +} +root := &xmlquery.Node{ + Data: "rss", + Type: xmlquery.ElementNode, +} +doc.FirstChild = root +channel := &xmlquery.Node{ + Data: "channel", + Type: xmlquery.ElementNode, +} +root.FirstChild = channel +title := &xmlquery.Node{ + Data: "title", + Type: xmlquery.ElementNode, +} +title_text := &xmlquery.Node{ + Data: "W3Schools Home Page", + Type: xmlquery.TextNode, +} +title.FirstChild = title_text +channel.FirstChild = title + +fmt.Println(doc.OutputXML(true)) +fmt.Println(doc.OutputXMLWithOptions(WithOutputSelf())) +``` + +Output: + +```xml +W3Schools Home Page +``` + +# FAQ + +#### `Find()` vs `QueryAll()`, which is better? + +`Find` and `QueryAll` both do the same thing: searches all of matched XML nodes. +`Find` panics if provided with an invalid XPath query, while `QueryAll` returns +an error. + +#### Can I save my query expression object for the next query? + +Yes, you can. We provide `QuerySelector` and `QuerySelectorAll` methods; they +accept your query expression object. + +Caching a query expression object avoids recompiling the XPath query +expression, improving query performance. + +# Questions + +Please let me know if you have any questions diff --git a/vendor/github.com/antchfx/xmlquery/cache.go b/vendor/github.com/antchfx/xmlquery/cache.go new file mode 100644 index 00000000000..3abffcdc57c --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/cache.go @@ -0,0 +1,43 @@ +package xmlquery + +import ( + "sync" + + "github.com/golang/groupcache/lru" + + "github.com/antchfx/xpath" +) + +// DisableSelectorCache will disable caching for the query selector if value is true. +var DisableSelectorCache = false + +// SelectorCacheMaxEntries allows how many selector object can be caching. Default is 50. +// Will disable caching if SelectorCacheMaxEntries <= 0. +var SelectorCacheMaxEntries = 50 + +var ( + cacheOnce sync.Once + cache *lru.Cache + cacheMutex sync.Mutex +) + +func getQuery(expr string) (*xpath.Expr, error) { + if DisableSelectorCache || SelectorCacheMaxEntries <= 0 { + return xpath.Compile(expr) + } + cacheOnce.Do(func() { + cache = lru.New(SelectorCacheMaxEntries) + }) + cacheMutex.Lock() + defer cacheMutex.Unlock() + if v, ok := cache.Get(expr); ok { + return v.(*xpath.Expr), nil + } + v, err := xpath.Compile(expr) + if err != nil { + return nil, err + } + cache.Add(expr, v) + return v, nil + +} diff --git a/vendor/github.com/antchfx/xmlquery/cached_reader.go b/vendor/github.com/antchfx/xmlquery/cached_reader.go new file mode 100644 index 00000000000..fe389c5d5c7 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/cached_reader.go @@ -0,0 +1,69 @@ +package xmlquery + +import ( + "bufio" +) + +type cachedReader struct { + buffer *bufio.Reader + cache []byte + cacheCap int + cacheLen int + caching bool +} + +func newCachedReader(r *bufio.Reader) *cachedReader { + return &cachedReader{ + buffer: r, + cache: make([]byte, 4096), + cacheCap: 4096, + cacheLen: 0, + caching: false, + } +} + +func (c *cachedReader) StartCaching() { + c.cacheLen = 0 + c.caching = true +} + +func (c *cachedReader) ReadByte() (byte, error) { + if !c.caching { + return c.buffer.ReadByte() + } + b, err := c.buffer.ReadByte() + if err != nil { + return b, err + } + if c.cacheLen < c.cacheCap { + c.cache[c.cacheLen] = b + c.cacheLen++ + } + return b, err +} + +func (c *cachedReader) Cache() []byte { + return c.cache[:c.cacheLen] +} + +func (c *cachedReader) StopCaching() { + c.caching = false +} + +func (c *cachedReader) Read(p []byte) (int, error) { + n, err := c.buffer.Read(p) + if err != nil { + return n, err + } + if c.caching && c.cacheLen < c.cacheCap { + for i := 0; i < n; i++ { + c.cache[c.cacheLen] = p[i] + c.cacheLen++ + if c.cacheLen >= c.cacheCap { + break + } + } + } + return n, err +} + diff --git a/vendor/github.com/antchfx/xmlquery/node.go b/vendor/github.com/antchfx/xmlquery/node.go new file mode 100644 index 00000000000..28f3f542794 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/node.go @@ -0,0 +1,341 @@ +package xmlquery + +import ( + "encoding/xml" + "fmt" + "html" + "strings" +) + +// A NodeType is the type of a Node. +type NodeType uint + +const ( + // DocumentNode is a document object that, as the root of the document tree, + // provides access to the entire XML document. + DocumentNode NodeType = iota + // DeclarationNode is the document type declaration, indicated by the + // following tag (for example, ). + DeclarationNode + // ElementNode is an element (for example, ). + ElementNode + // TextNode is the text content of a node. + TextNode + // CharDataNode node + CharDataNode + // CommentNode a comment (for example, ). + CommentNode + // AttributeNode is an attribute of element. + AttributeNode + // NotationNode is a directive represents in document (for example, ). + NotationNode +) + +type Attr struct { + Name xml.Name + Value string + NamespaceURI string +} + +// A Node consists of a NodeType and some Data (tag name for +// element nodes, content for text) and are part of a tree of Nodes. +type Node struct { + Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node + + Type NodeType + Data string + Prefix string + NamespaceURI string + Attr []Attr + + level int // node level in the tree +} + +type outputConfiguration struct { + printSelf bool + preserveSpaces bool + emptyElementTagSupport bool + skipComments bool +} + +type OutputOption func(*outputConfiguration) + +// WithOutputSelf configures the Node to print the root node itself +func WithOutputSelf() OutputOption { + return func(oc *outputConfiguration) { + oc.printSelf = true + } +} + +// WithEmptyTagSupport empty tags should be written as and +// not as +func WithEmptyTagSupport() OutputOption { + return func(oc *outputConfiguration) { + oc.emptyElementTagSupport = true + } +} + +// WithoutComments will skip comments in output +func WithoutComments() OutputOption { + return func(oc *outputConfiguration) { + oc.skipComments = true + } +} + +// WithPreserveSpace will preserve spaces in output +func WithPreserveSpace() OutputOption { + return func(oc *outputConfiguration) { + oc.preserveSpaces = true + } +} + +func newXMLName(name string) xml.Name { + if i := strings.IndexByte(name, ':'); i > 0 { + return xml.Name{ + Space: name[:i], + Local: name[i+1:], + } + } + return xml.Name{ + Local: name, + } +} + +func (n *Node) Level() int { + return n.level +} + +// InnerText returns the text between the start and end tags of the object. +func (n *Node) InnerText() string { + var output func(*strings.Builder, *Node) + output = func(b *strings.Builder, n *Node) { + switch n.Type { + case TextNode, CharDataNode: + b.WriteString(n.Data) + case CommentNode: + default: + for child := n.FirstChild; child != nil; child = child.NextSibling { + output(b, child) + } + } + } + + var b strings.Builder + output(&b, n) + return b.String() +} + +func (n *Node) sanitizedData(preserveSpaces bool) string { + if preserveSpaces { + return n.Data + } + return strings.TrimSpace(n.Data) +} + +func calculatePreserveSpaces(n *Node, pastValue bool) bool { + if attr := n.SelectAttr("xml:space"); attr == "preserve" { + return true + } else if attr == "default" { + return false + } + return pastValue +} + +func outputXML(b *strings.Builder, n *Node, preserveSpaces bool, config *outputConfiguration) { + preserveSpaces = calculatePreserveSpaces(n, preserveSpaces) + switch n.Type { + case TextNode: + b.WriteString(html.EscapeString(n.sanitizedData(preserveSpaces))) + return + case CharDataNode: + b.WriteString("") + return + case CommentNode: + if !config.skipComments { + b.WriteString("") + } + return + case NotationNode: + fmt.Fprintf(b, "", n.Data) + return + case DeclarationNode: + b.WriteString("") + } else { + if n.FirstChild != nil || !config.emptyElementTagSupport { + b.WriteString(">") + } else { + b.WriteString("/>") + return + } + } + for child := n.FirstChild; child != nil; child = child.NextSibling { + outputXML(b, child, preserveSpaces, config) + } + if n.Type != DeclarationNode { + if n.Prefix == "" { + fmt.Fprintf(b, "", n.Data) + } else { + fmt.Fprintf(b, "", n.Prefix, n.Data) + } + } +} + +// OutputXML returns the text that including tags name. +func (n *Node) OutputXML(self bool) string { + + config := &outputConfiguration{ + printSelf: true, + emptyElementTagSupport: false, + } + preserveSpaces := calculatePreserveSpaces(n, false) + var b strings.Builder + if self && n.Type != DocumentNode { + outputXML(&b, n, preserveSpaces, config) + } else { + for n := n.FirstChild; n != nil; n = n.NextSibling { + outputXML(&b, n, preserveSpaces, config) + } + } + + return b.String() +} + +// OutputXMLWithOptions returns the text that including tags name. +func (n *Node) OutputXMLWithOptions(opts ...OutputOption) string { + + config := &outputConfiguration{} + // Set the options + for _, opt := range opts { + opt(config) + } + pastPreserveSpaces := config.preserveSpaces + preserveSpaces := calculatePreserveSpaces(n, pastPreserveSpaces) + var b strings.Builder + if config.printSelf && n.Type != DocumentNode { + outputXML(&b, n, preserveSpaces, config) + } else { + for n := n.FirstChild; n != nil; n = n.NextSibling { + outputXML(&b, n, preserveSpaces, config) + } + } + + return b.String() +} + +// AddAttr adds a new attribute specified by 'key' and 'val' to a node 'n'. +func AddAttr(n *Node, key, val string) { + attr := Attr{ + Name: newXMLName(key), + Value: val, + } + n.Attr = append(n.Attr, attr) +} + +// SetAttr allows an attribute value with the specified name to be changed. +// If the attribute did not previously exist, it will be created. +func (n *Node) SetAttr(key, value string) { + name := newXMLName(key) + for i, attr := range n.Attr { + if attr.Name == name { + n.Attr[i].Value = value + return + } + } + AddAttr(n, key, value) +} + +// RemoveAttr removes the attribute with the specified name. +func (n *Node) RemoveAttr(key string) { + name := newXMLName(key) + for i, attr := range n.Attr { + if attr.Name == name { + n.Attr = append(n.Attr[:i], n.Attr[i+1:]...) + return + } + } +} + +// AddChild adds a new node 'n' to a node 'parent' as its last child. +func AddChild(parent, n *Node) { + n.Parent = parent + n.NextSibling = nil + if parent.FirstChild == nil { + parent.FirstChild = n + n.PrevSibling = nil + } else { + parent.LastChild.NextSibling = n + n.PrevSibling = parent.LastChild + } + + parent.LastChild = n +} + +// AddSibling adds a new node 'n' as a sibling of a given node 'sibling'. +// Note it is not necessarily true that the new node 'n' would be added +// immediately after 'sibling'. If 'sibling' isn't the last child of its +// parent, then the new node 'n' will be added at the end of the sibling +// chain of their parent. +func AddSibling(sibling, n *Node) { + for t := sibling.NextSibling; t != nil; t = t.NextSibling { + sibling = t + } + n.Parent = sibling.Parent + sibling.NextSibling = n + n.PrevSibling = sibling + n.NextSibling = nil + if sibling.Parent != nil { + sibling.Parent.LastChild = n + } +} + +// RemoveFromTree removes a node and its subtree from the document +// tree it is in. If the node is the root of the tree, then it's no-op. +func RemoveFromTree(n *Node) { + if n.Parent == nil { + return + } + if n.Parent.FirstChild == n { + if n.Parent.LastChild == n { + n.Parent.FirstChild = nil + n.Parent.LastChild = nil + } else { + n.Parent.FirstChild = n.NextSibling + n.NextSibling.PrevSibling = nil + } + } else { + if n.Parent.LastChild == n { + n.Parent.LastChild = n.PrevSibling + n.PrevSibling.NextSibling = nil + } else { + n.PrevSibling.NextSibling = n.NextSibling + n.NextSibling.PrevSibling = n.PrevSibling + } + } + n.Parent = nil + n.PrevSibling = nil + n.NextSibling = nil +} diff --git a/vendor/github.com/antchfx/xmlquery/options.go b/vendor/github.com/antchfx/xmlquery/options.go new file mode 100644 index 00000000000..6b902d21480 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/options.go @@ -0,0 +1,33 @@ +package xmlquery + +import ( + "encoding/xml" + "io" +) + +type ParserOptions struct { + Decoder *DecoderOptions +} + +func (options ParserOptions) apply(parser *parser) { + if options.Decoder != nil { + (*options.Decoder).apply(parser.decoder) + } +} + +// DecoderOptions implement the very same options than the standard +// encoding/xml package. Please refer to this documentation: +// https://golang.org/pkg/encoding/xml/#Decoder +type DecoderOptions struct { + Strict bool + AutoClose []string + Entity map[string]string + CharsetReader func(charset string, input io.Reader) (io.Reader, error) +} + +func (options DecoderOptions) apply(decoder *xml.Decoder) { + decoder.Strict = options.Strict + decoder.AutoClose = options.AutoClose + decoder.Entity = options.Entity + decoder.CharsetReader = options.CharsetReader +} diff --git a/vendor/github.com/antchfx/xmlquery/parse.go b/vendor/github.com/antchfx/xmlquery/parse.go new file mode 100644 index 00000000000..daf7233a048 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/parse.go @@ -0,0 +1,414 @@ +package xmlquery + +import ( + "bufio" + "encoding/xml" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "sync" + + "github.com/antchfx/xpath" + "golang.org/x/net/html/charset" +) + +var xmlMIMERegex = regexp.MustCompile(`(?i)((application|image|message|model)/((\w|\.|-)+\+?)?|text/)(wb)?xml`) + +// LoadURL loads the XML document from the specified URL. +func LoadURL(url string) (*Node, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + // Make sure the Content-Type has a valid XML MIME type + if xmlMIMERegex.MatchString(resp.Header.Get("Content-Type")) { + return Parse(resp.Body) + } + return nil, fmt.Errorf("invalid XML document(%s)", resp.Header.Get("Content-Type")) +} + +// Parse returns the parse tree for the XML from the given Reader. +func Parse(r io.Reader) (*Node, error) { + return ParseWithOptions(r, ParserOptions{}) +} + +// ParseWithOptions is like parse, but with custom options +func ParseWithOptions(r io.Reader, options ParserOptions) (*Node, error) { + p := createParser(r) + options.apply(p) + for { + _, err := p.parse() + if err == io.EOF { + return p.doc, nil + } + if err != nil { + return nil, err + } + } +} + +type parser struct { + decoder *xml.Decoder + doc *Node + level int + prev *Node + streamElementXPath *xpath.Expr // Under streaming mode, this specifies the xpath to the target element node(s). + streamElementFilter *xpath.Expr // If specified, it provides further filtering on the target element. + streamNode *Node // Need to remember the last target node So we can clean it up upon next Read() call. + streamNodePrev *Node // Need to remember target node's prev so upon target node removal, we can restore correct prev. + reader *cachedReader // Need to maintain a reference to the reader, so we can determine whether a node contains CDATA. + once sync.Once + space2prefix map[string]*xmlnsPrefix +} + +type xmlnsPrefix struct { + name string + level int +} + +func createParser(r io.Reader) *parser { + reader := newCachedReader(bufio.NewReader(r)) + p := &parser{ + decoder: xml.NewDecoder(reader), + doc: &Node{Type: DocumentNode}, + level: 0, + reader: reader, + } + if p.decoder.CharsetReader == nil { + p.decoder.CharsetReader = charset.NewReaderLabel + } + p.prev = p.doc + return p +} + +func (p *parser) parse() (*Node, error) { + p.once.Do(func() { + p.space2prefix = map[string]*xmlnsPrefix{"http://www.w3.org/XML/1998/namespace": {name: "xml", level: 0}} + }) + + var streamElementNodeCounter int + for { + p.reader.StartCaching() + tok, err := p.decoder.Token() + p.reader.StopCaching() + if err != nil { + return nil, err + } + + switch tok := tok.(type) { + case xml.StartElement: + if p.level == 0 { + // mising XML declaration + attributes := make([]Attr, 1) + attributes[0].Name = xml.Name{Local: "version"} + attributes[0].Value = "1.0" + node := &Node{ + Type: DeclarationNode, + Data: "xml", + Attr: attributes, + level: 1, + } + AddChild(p.prev, node) + p.level = 1 + p.prev = node + } + + for _, att := range tok.Attr { + if att.Name.Local == "xmlns" { + // https://github.com/antchfx/xmlquery/issues/67 + if prefix, ok := p.space2prefix[att.Value]; !ok || (ok && prefix.level >= p.level) { + p.space2prefix[att.Value] = &xmlnsPrefix{name: "", level: p.level} // reset empty if exist the default namespace + } + } else if att.Name.Space == "xmlns" { + // maybe there are have duplicate NamespaceURL? + p.space2prefix[att.Value] = &xmlnsPrefix{name: att.Name.Local, level: p.level} + } + } + + if space := tok.Name.Space; space != "" { + if _, found := p.space2prefix[space]; !found && p.decoder.Strict { + return nil, fmt.Errorf("xmlquery: invalid XML document, namespace %s is missing", space) + } + } + + attributes := make([]Attr, len(tok.Attr)) + for i, att := range tok.Attr { + name := att.Name + if prefix, ok := p.space2prefix[name.Space]; ok { + name.Space = prefix.name + } + attributes[i] = Attr{ + Name: name, + Value: att.Value, + NamespaceURI: att.Name.Space, + } + } + + node := &Node{ + Type: ElementNode, + Data: tok.Name.Local, + NamespaceURI: tok.Name.Space, + Attr: attributes, + level: p.level, + } + + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + + if node.NamespaceURI != "" { + if v, ok := p.space2prefix[node.NamespaceURI]; ok { + cached := string(p.reader.Cache()) + if strings.HasPrefix(cached, fmt.Sprintf("%s:%s", v.name, node.Data)) || strings.HasPrefix(cached, fmt.Sprintf("<%s:%s", v.name, node.Data)) { + node.Prefix = v.name + } + } + } + // If we're in the streaming mode, we need to remember the node if it is the target node + // so that when we finish processing the node's EndElement, we know how/what to return to + // caller. Also we need to remove the target node from the tree upon next Read() call so + // memory doesn't grow unbounded. + if p.streamElementXPath != nil { + if p.streamNode == nil { + if QuerySelector(p.doc, p.streamElementXPath) != nil { + p.streamNode = node + p.streamNodePrev = p.prev + streamElementNodeCounter = 1 + } + } else { + streamElementNodeCounter++ + } + } + p.prev = node + p.level++ + case xml.EndElement: + p.level-- + // If we're in streaming mode, and we already have a potential streaming + // target node identified (p.streamNode != nil) then we need to check if + // this is the real one we want to return to caller. + if p.streamNode != nil { + streamElementNodeCounter-- + if streamElementNodeCounter == 0 { + // Now we know this element node is the at least passing the initial + // p.streamElementXPath check and is a potential target node candidate. + // We need to have 1 more check with p.streamElementFilter (if given) to + // ensure it is really the element node we want. + // The reason we need a two-step check process is because the following + // situation: + // b1 + // And say the p.streamElementXPath = "/AAA/BBB[. != 'b1']". Now during + // xml.StartElement time, the node is still empty, so it will pass + // the p.streamElementXPath check. However, eventually we know this + // shouldn't be returned to the caller. Having a second more fine-grained + // filter check ensures that. So in this case, the caller should really + // setup the stream parser with: + // streamElementXPath = "/AAA/BBB[" + // streamElementFilter = "/AAA/BBB[. != 'b1']" + if p.streamElementFilter == nil || QuerySelector(p.doc, p.streamElementFilter) != nil { + return p.streamNode, nil + } + // otherwise, this isn't our target node, clean things up. + // note we also remove the underlying *Node from the node tree, to prevent + // future stream node candidate selection error. + RemoveFromTree(p.streamNode) + p.prev = p.streamNodePrev + p.streamNode = nil + p.streamNodePrev = nil + } + } + case xml.CharData: + // First, normalize the cache... + cached := strings.ToUpper(string(p.reader.Cache())) + nodeType := TextNode + if strings.HasPrefix(cached, " p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + case xml.Comment: + node := &Node{Type: CommentNode, Data: string(tok), level: p.level} + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + case xml.ProcInst: // Processing Instruction + if p.prev.Type != DeclarationNode { + p.level++ + } + node := &Node{Type: DeclarationNode, Data: tok.Target, level: p.level} + pairs := strings.Split(string(tok.Inst), " ") + for _, pair := range pairs { + pair = strings.TrimSpace(pair) + if i := strings.Index(pair, "="); i > 0 { + AddAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`)) + } + } + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + p.prev = node + case xml.Directive: + node := &Node{Type: NotationNode, Data: string(tok), level: p.level} + if p.level == p.prev.level { + AddSibling(p.prev, node) + } else if p.level > p.prev.level { + AddChild(p.prev, node) + } else if p.level < p.prev.level { + for i := p.prev.level - p.level; i > 1; i-- { + p.prev = p.prev.Parent + } + AddSibling(p.prev.Parent, node) + } + } + } +} + +// StreamParser enables loading and parsing an XML document in a streaming +// fashion. +type StreamParser struct { + p *parser +} + +// CreateStreamParser creates a StreamParser. Argument streamElementXPath is +// required. +// Argument streamElementFilter is optional and should only be used in advanced +// scenarios. +// +// Scenario 1: simple case: +// +// xml := `b1b2` +// sp, err := CreateStreamParser(strings.NewReader(xml), "/AAA/BBB") +// if err != nil { +// panic(err) +// } +// for { +// n, err := sp.Read() +// if err != nil { +// break +// } +// fmt.Println(n.OutputXML(true)) +// } +// +// Output will be: +// +// b1 +// b2 +// +// Scenario 2: advanced case: +// +// xml := `b1b2` +// sp, err := CreateStreamParser(strings.NewReader(xml), "/AAA/BBB", "/AAA/BBB[. != 'b1']") +// if err != nil { +// panic(err) +// } +// for { +// n, err := sp.Read() +// if err != nil { +// break +// } +// fmt.Println(n.OutputXML(true)) +// } +// +// Output will be: +// +// b2 +// +// As the argument names indicate, streamElementXPath should be used for +// providing xpath query pointing to the target element node only, no extra +// filtering on the element itself or its children; while streamElementFilter, +// if needed, can provide additional filtering on the target element and its +// children. +// +// CreateStreamParser returns an error if either streamElementXPath or +// streamElementFilter, if provided, cannot be successfully parsed and compiled +// into a valid xpath query. +func CreateStreamParser(r io.Reader, streamElementXPath string, streamElementFilter ...string) (*StreamParser, error) { + return CreateStreamParserWithOptions(r, ParserOptions{}, streamElementXPath, streamElementFilter...) +} + +// CreateStreamParserWithOptions is like CreateStreamParser, but with custom options +func CreateStreamParserWithOptions( + r io.Reader, + options ParserOptions, + streamElementXPath string, + streamElementFilter ...string, +) (*StreamParser, error) { + elemXPath, err := getQuery(streamElementXPath) + if err != nil { + return nil, fmt.Errorf("invalid streamElementXPath '%s', err: %s", streamElementXPath, err.Error()) + } + elemFilter := (*xpath.Expr)(nil) + if len(streamElementFilter) > 0 { + elemFilter, err = getQuery(streamElementFilter[0]) + if err != nil { + return nil, fmt.Errorf("invalid streamElementFilter '%s', err: %s", streamElementFilter[0], err.Error()) + } + } + parser := createParser(r) + options.apply(parser) + sp := &StreamParser{ + p: parser, + } + sp.p.streamElementXPath = elemXPath + sp.p.streamElementFilter = elemFilter + return sp, nil +} + +// Read returns a target node that satisfies the XPath specified by caller at +// StreamParser creation time. If there is no more satisfying target nodes after +// reading the rest of the XML document, io.EOF will be returned. At any time, +// any XML parsing error encountered will be returned, and the stream parsing +// stopped. Calling Read() after an error is returned (including io.EOF) results +// undefined behavior. Also note, due to the streaming nature, calling Read() +// will automatically remove any previous target node(s) from the document tree. +func (sp *StreamParser) Read() (*Node, error) { + // Because this is a streaming read, we need to release/remove last + // target node from the node tree to free up memory. + if sp.p.streamNode != nil { + // We need to remove all siblings before the current stream node, + // because the document may contain unwanted nodes between the target + // ones (for example new line text node), which would otherwise + // accumulate as first childs, and slow down the stream over time + for sp.p.streamNode.PrevSibling != nil { + RemoveFromTree(sp.p.streamNode.PrevSibling) + } + sp.p.prev = sp.p.streamNode.Parent + RemoveFromTree(sp.p.streamNode) + sp.p.streamNode = nil + sp.p.streamNodePrev = nil + } + return sp.p.parse() +} diff --git a/vendor/github.com/antchfx/xmlquery/query.go b/vendor/github.com/antchfx/xmlquery/query.go new file mode 100644 index 00000000000..d1353aac485 --- /dev/null +++ b/vendor/github.com/antchfx/xmlquery/query.go @@ -0,0 +1,304 @@ +/* +Package xmlquery provides extract data from XML documents using XPath expression. +*/ +package xmlquery + +import ( + "fmt" + "strings" + + "github.com/antchfx/xpath" +) + +// SelectElements finds child elements with the specified name. +func (n *Node) SelectElements(name string) []*Node { + return Find(n, name) +} + +// SelectElement finds child elements with the specified name. +func (n *Node) SelectElement(name string) *Node { + return FindOne(n, name) +} + +// SelectAttr returns the attribute value with the specified name. +func (n *Node) SelectAttr(name string) string { + if n.Type == AttributeNode { + if n.Data == name { + return n.InnerText() + } + return "" + } + xmlName := newXMLName(name) + for _, attr := range n.Attr { + if attr.Name == xmlName { + return attr.Value + } + } + return "" +} + +var _ xpath.NodeNavigator = &NodeNavigator{} + +// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified +// XML Node. +func CreateXPathNavigator(top *Node) *NodeNavigator { + return &NodeNavigator{curr: top, root: top, attr: -1} +} + +func getCurrentNode(it *xpath.NodeIterator) *Node { + n := it.Current().(*NodeNavigator) + if n.NodeType() == xpath.AttributeNode { + childNode := &Node{ + Type: TextNode, + Data: n.Value(), + } + return &Node{ + Parent: n.curr, + Type: AttributeNode, + Data: n.LocalName(), + FirstChild: childNode, + LastChild: childNode, + } + } + return n.curr +} + +// Find is like QueryAll but panics if `expr` is not a valid XPath expression. +// See `QueryAll()` function. +func Find(top *Node, expr string) []*Node { + nodes, err := QueryAll(top, expr) + if err != nil { + panic(err) + } + return nodes +} + +// FindOne is like Query but panics if `expr` is not a valid XPath expression. +// See `Query()` function. +func FindOne(top *Node, expr string) *Node { + node, err := Query(top, expr) + if err != nil { + panic(err) + } + return node +} + +// QueryAll searches the XML Node that matches by the specified XPath expr. +// Returns an error if the expression `expr` cannot be parsed. +func QueryAll(top *Node, expr string) ([]*Node, error) { + exp, err := getQuery(expr) + if err != nil { + return nil, err + } + return QuerySelectorAll(top, exp), nil +} + +// Query searches the XML Node that matches by the specified XPath expr, +// and returns first matched element. +func Query(top *Node, expr string) (*Node, error) { + exp, err := getQuery(expr) + if err != nil { + return nil, err + } + return QuerySelector(top, exp), nil +} + +// QuerySelectorAll searches all of the XML Node that matches the specified +// XPath selectors. +func QuerySelectorAll(top *Node, selector *xpath.Expr) []*Node { + t := selector.Select(CreateXPathNavigator(top)) + var elems []*Node + for t.MoveNext() { + elems = append(elems, getCurrentNode(t)) + } + return elems +} + +// QuerySelector returns the first matched XML Node by the specified XPath +// selector. +func QuerySelector(top *Node, selector *xpath.Expr) *Node { + t := selector.Select(CreateXPathNavigator(top)) + if t.MoveNext() { + return getCurrentNode(t) + } + return nil +} + +// FindEach searches the html.Node and calls functions cb. +// Important: this method is deprecated, instead, use for .. = range Find(){}. +func FindEach(top *Node, expr string, cb func(int, *Node)) { + for i, n := range Find(top, expr) { + cb(i, n) + } +} + +// FindEachWithBreak functions the same as FindEach but allows to break the loop +// by returning false from the callback function `cb`. +// Important: this method is deprecated, instead, use .. = range Find(){}. +func FindEachWithBreak(top *Node, expr string, cb func(int, *Node) bool) { + for i, n := range Find(top, expr) { + if !cb(i, n) { + break + } + } +} + +type NodeNavigator struct { + root, curr *Node + attr int +} + +func (x *NodeNavigator) Current() *Node { + return x.curr +} + +func (x *NodeNavigator) NodeType() xpath.NodeType { + switch x.curr.Type { + case CommentNode: + return xpath.CommentNode + case TextNode, CharDataNode, NotationNode: + return xpath.TextNode + case DeclarationNode, DocumentNode: + return xpath.RootNode + case ElementNode: + if x.attr != -1 { + return xpath.AttributeNode + } + return xpath.ElementNode + } + panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type)) +} + +func (x *NodeNavigator) LocalName() string { + if x.attr != -1 { + return x.curr.Attr[x.attr].Name.Local + } + return x.curr.Data + +} + +func (x *NodeNavigator) Prefix() string { + if x.NodeType() == xpath.AttributeNode { + if x.attr != -1 { + return x.curr.Attr[x.attr].Name.Space + } + return "" + } + return x.curr.Prefix +} + +func (x *NodeNavigator) NamespaceURL() string { + if x.attr != -1 { + return x.curr.Attr[x.attr].NamespaceURI + } + return x.curr.NamespaceURI +} + +func (x *NodeNavigator) Value() string { + switch x.curr.Type { + case CommentNode: + return x.curr.Data + case ElementNode: + if x.attr != -1 { + return x.curr.Attr[x.attr].Value + } + return x.curr.InnerText() + case TextNode: + return x.curr.Data + } + return "" +} + +func (x *NodeNavigator) Copy() xpath.NodeNavigator { + n := *x + return &n +} + +func (x *NodeNavigator) MoveToRoot() { + x.curr = x.root +} + +func (x *NodeNavigator) MoveToParent() bool { + if x.attr != -1 { + x.attr = -1 + return true + } else if node := x.curr.Parent; node != nil { + x.curr = node + return true + } + return false +} + +func (x *NodeNavigator) MoveToNextAttribute() bool { + if x.attr >= len(x.curr.Attr)-1 { + return false + } + x.attr++ + return true +} + +func (x *NodeNavigator) MoveToChild() bool { + if x.attr != -1 { + return false + } + if node := x.curr.FirstChild; node != nil { + x.curr = node + return true + } + return false +} + +func (x *NodeNavigator) MoveToFirst() bool { + if x.attr != -1 || x.curr.PrevSibling == nil { + return false + } + for { + node := x.curr.PrevSibling + if node == nil { + break + } + x.curr = node + } + return true +} + +func (x *NodeNavigator) String() string { + return x.Value() +} + +func (x *NodeNavigator) MoveToNext() bool { + if x.attr != -1 { + return false + } + for node := x.curr.NextSibling; node != nil; node = x.curr.NextSibling { + x.curr = node + if x.curr.Type != TextNode || strings.TrimSpace(x.curr.Data) != "" { + return true + } + } + return false +} + +func (x *NodeNavigator) MoveToPrevious() bool { + if x.attr != -1 { + return false + } + for node := x.curr.PrevSibling; node != nil; node = x.curr.PrevSibling { + x.curr = node + if x.curr.Type != TextNode || strings.TrimSpace(x.curr.Data) != "" { + return true + } + } + return false +} + +func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool { + node, ok := other.(*NodeNavigator) + if !ok || node.root != x.root { + return false + } + + x.curr = node.curr + x.attr = node.attr + return true +} diff --git a/vendor/github.com/antchfx/xpath/.gitignore b/vendor/github.com/antchfx/xpath/.gitignore new file mode 100644 index 00000000000..4d5d27b1d3a --- /dev/null +++ b/vendor/github.com/antchfx/xpath/.gitignore @@ -0,0 +1,32 @@ +# vscode +.vscode +debug +*.test + +./build + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/antchfx/xpath/LICENSE similarity index 92% rename from vendor/github.com/mitchellh/go-testing-interface/LICENSE rename to vendor/github.com/antchfx/xpath/LICENSE index a3866a291fd..e14c37141c5 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/LICENSE +++ b/vendor/github.com/antchfx/xpath/LICENSE @@ -1,7 +1,3 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights @@ -18,4 +14,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/README.md b/vendor/github.com/antchfx/xpath/README.md new file mode 100644 index 00000000000..733c4c8727e --- /dev/null +++ b/vendor/github.com/antchfx/xpath/README.md @@ -0,0 +1,167 @@ +# XPath + +[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath) +[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master) +[![Build Status](https://github.com/antchfx/xpath/actions/workflows/testing.yml/badge.svg)](https://github.com/antchfx/xpath/actions/workflows/testing.yml) +[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath) + +XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression. + +# Implementation + +- [htmlquery](https://github.com/antchfx/htmlquery) - an XPath query package for HTML document + +- [xmlquery](https://github.com/antchfx/xmlquery) - an XPath query package for XML document. + +- [jsonquery](https://github.com/antchfx/jsonquery) - an XPath query package for JSON document + +# Supported Features + +#### The basic XPath patterns. + +> The basic XPath patterns cover 90% of the cases that most stylesheets will need. + +- `node` : Selects all child elements with nodeName of node. + +- `*` : Selects all child elements. + +- `@attr` : Selects the attribute attr. + +- `@*` : Selects all attributes. + +- `node()` : Matches an org.w3c.dom.Node. + +- `text()` : Matches a org.w3c.dom.Text node. + +- `comment()` : Matches a comment. + +- `.` : Selects the current node. + +- `..` : Selects the parent of current node. + +- `/` : Selects the document node. + +- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr. + +- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position. + +- `a/b` : For each node matching a, add the nodes matching b to the result. + +- `a//b` : For each node matching a, add the descendant nodes matching b to the result. + +- `//b` : Returns elements in the entire document matching b. + +- `a|b` : All nodes matching a or b, union operation(not boolean or). + +- `(a, b, c)` : Evaluates each of its operands and concatenates the resulting sequences, in order, into a single result sequence + +- `(a/b)` : Selects all matches nodes as grouping set. + +#### Node Axes + +- `child::*` : The child axis selects children of the current node. + + - `child::node()`: Selects all the children of the context node. + - `child::text()`: Selects all text node children of the context node. + +- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'. + +- `descendant-or-self::*` : Selects descendants including the current node. + +- `attribute::*` : Selects attributes of the current element. It is equivalent to @\* + +- `following-sibling::*` : Selects nodes after the current node. + +- `preceding-sibling::*` : Selects nodes before the current node. + +- `following::*` : Selects the first matching node following in document order, excluding descendants. + +- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors. + +- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'. + +- `ancestor::*` : Selects matching ancestors. + +- `ancestor-or-self::*` : Selects ancestors including the current node. + +- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'. + +#### Expressions + +The gxpath supported three types: number, boolean, string. + +- `path` : Selects nodes based on the path. + +- `a = b` : Standard comparisons. + + - `a = b` : True if a equals b. + - `a != b` : True if a is not equal to b. + - `a < b` : True if a is less than b. + - `a <= b` : True if a is less than or equal to b. + - `a > b` : True if a is greater than b. + - `a >= b` : True if a is greater than or equal to b. + +- `a + b` : Arithmetic expressions. + + - `- a` Unary minus + - `a + b` : Addition + - `a - b` : Subtraction + - `a * b` : Multiplication + - `a div b` : Division + - `a mod b` : Modulus (division remainder) + +- `a or b` : Boolean `or` operation. + +- `a and b` : Boolean `and` operation. + +- `(expr)` : Parenthesized expressions. + +- `fun(arg1, ..., argn)` : Function calls: + +| Function | Supported | +| ----------------------- | --------- | +| `boolean()` | ✓ | +| `ceiling()` | ✓ | +| `choose()` | ✗ | +| `concat()` | ✓ | +| `contains()` | ✓ | +| `count()` | ✓ | +| `current()` | ✗ | +| `document()` | ✗ | +| `element-available()` | ✗ | +| `ends-with()` | ✓ | +| `false()` | ✓ | +| `floor()` | ✓ | +| `format-number()` | ✗ | +| `function-available()` | ✗ | +| `generate-id()` | ✗ | +| `id()` | ✗ | +| `key()` | ✗ | +| `lang()` | ✗ | +| `last()` | ✓ | +| `local-name()` | ✓ | +| `lower-case()`[^1] | ✓ | +| `matches()` | ✓ | +| `name()` | ✓ | +| `namespace-uri()` | ✓ | +| `normalize-space()` | ✓ | +| `not()` | ✓ | +| `number()` | ✓ | +| `position()` | ✓ | +| `replace()` | ✓ | +| `reverse()` | ✓ | +| `round()` | ✓ | +| `starts-with()` | ✓ | +| `string()` | ✓ | +| `string-join()`[^1] | ✓ | +| `string-length()` | ✓ | +| `substring()` | ✓ | +| `substring-after()` | ✓ | +| `substring-before()` | ✓ | +| `sum()` | ✓ | +| `system-property()` | ✗ | +| `translate()` | ✓ | +| `true()` | ✓ | +| `unparsed-entity-url()` | ✗ | + +[^1]: XPath-2.0 expression diff --git a/vendor/github.com/antchfx/xpath/build.go b/vendor/github.com/antchfx/xpath/build.go new file mode 100644 index 00000000000..a93c8eb5f78 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/build.go @@ -0,0 +1,718 @@ +package xpath + +import ( + "errors" + "fmt" +) + +type flag int + +var flagsEnum = struct { + None flag + SmartDesc flag + PosFilter flag + Filter flag + Condition flag +}{ + None: 0, + SmartDesc: 1, + PosFilter: 2, + Filter: 4, + Condition: 8, +} + +type builderProp int + +var builderProps = struct { + None builderProp + PosFilter builderProp + HasPosition builderProp + HasLast builderProp + NonFlat builderProp +}{ + None: 0, + PosFilter: 1, + HasPosition: 2, + HasLast: 4, + NonFlat: 8, +} + +// builder provides building an XPath expressions. +type builder struct { + parseDepth int + firstInput query +} + +// axisPredicate creates a predicate to predicating for this axis node. +func axisPredicate(root *axisNode) func(NodeNavigator) bool { + nametest := root.LocalName != "" || root.Prefix != "" + predicate := func(n NodeNavigator) bool { + if root.typeTest == n.NodeType() || root.typeTest == allNode { + if nametest { + type namespaceURL interface { + NamespaceURL() string + } + if ns, ok := n.(namespaceURL); ok && root.hasNamespaceURI { + return root.LocalName == n.LocalName() && root.namespaceURI == ns.NamespaceURL() + } + if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() { + return true + } + } else { + return true + } + } + return false + } + + return predicate +} + +// processAxis processes a query for the XPath axis node. +func (b *builder) processAxis(root *axisNode, flags flag, props *builderProp) (query, error) { + var ( + err error + qyInput query + qyOutput query + ) + b.firstInput = nil + predicate := axisPredicate(root) + + if root.Input == nil { + qyInput = &contextQuery{} + *props = builderProps.None + } else { + inputFlags := flagsEnum.None + if (flags & flagsEnum.Filter) == 0 { + if root.AxisType == "child" && (root.Input.Type() == nodeAxis) { + if input := root.Input.(*axisNode); input.AxisType == "descendant-or-self" { + var qyGrandInput query + if input.Input != nil { + qyGrandInput, err = b.processNode(input.Input, flagsEnum.SmartDesc, props) + if err != nil { + return nil, err + } + } else { + qyGrandInput = &contextQuery{} + } + qyOutput = &descendantQuery{name: root.LocalName, Input: qyGrandInput, Predicate: predicate, Self: false} + *props |= builderProps.NonFlat + return qyOutput, nil + } + } + if root.AxisType == "descendant" || root.AxisType == "descendant-or-self" { + inputFlags |= flagsEnum.SmartDesc + } + } + + qyInput, err = b.processNode(root.Input, inputFlags, props) + if err != nil { + return nil, err + } + } + + switch root.AxisType { + case "ancestor": + qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + *props |= builderProps.NonFlat + case "ancestor-or-self": + qyOutput = &ancestorQuery{name: root.LocalName, Input: qyInput, Predicate: predicate, Self: true} + *props |= builderProps.NonFlat + case "attribute": + qyOutput = &attributeQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + case "child": + if (*props & builderProps.NonFlat) == 0 { + qyOutput = &childQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + } else { + qyOutput = &cachedChildQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + } + case "descendant": + if (flags & flagsEnum.SmartDesc) != flagsEnum.None { + qyOutput = &descendantOverDescendantQuery{name: root.LocalName, Input: qyInput, MatchSelf: false, Predicate: predicate} + } else { + qyOutput = &descendantQuery{name: root.LocalName, Input: qyInput, Predicate: predicate} + } + *props |= builderProps.NonFlat + case "descendant-or-self": + if (flags & flagsEnum.SmartDesc) != flagsEnum.None { + qyOutput = &descendantOverDescendantQuery{name: root.LocalName, Input: qyInput, MatchSelf: true, Predicate: predicate} + } else { + qyOutput = &descendantQuery{name: root.LocalName, Input: qyInput, Predicate: predicate, Self: true} + } + *props |= builderProps.NonFlat + case "following": + qyOutput = &followingQuery{Input: qyInput, Predicate: predicate} + *props |= builderProps.NonFlat + case "following-sibling": + qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true} + case "parent": + qyOutput = &parentQuery{Input: qyInput, Predicate: predicate} + case "preceding": + qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate} + *props |= builderProps.NonFlat + case "preceding-sibling": + qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true} + case "self": + qyOutput = &selfQuery{Input: qyInput, Predicate: predicate} + case "namespace": + // haha,what will you do someting?? + default: + err = fmt.Errorf("unknown axe type: %s", root.AxisType) + return nil, err + } + return qyOutput, nil +} + +func canBeNumber(q query) bool { + if q.ValueType() != xpathResultType.Any { + return q.ValueType() == xpathResultType.Number + } + return true +} + +// processFilterNode builds query for the XPath filter predicate. +func (b *builder) processFilter(root *filterNode, flags flag, props *builderProp) (query, error) { + first := (flags & flagsEnum.Filter) == 0 + + qyInput, err := b.processNode(root.Input, (flags | flagsEnum.Filter), props) + if err != nil { + return nil, err + } + firstInput := b.firstInput + + var propsCond builderProp + cond, err := b.processNode(root.Condition, flags, &propsCond) + if err != nil { + return nil, err + } + + // Checking whether is number + if canBeNumber(cond) || ((propsCond & (builderProps.HasPosition | builderProps.HasLast)) != 0) { + propsCond |= builderProps.HasPosition + flags |= flagsEnum.PosFilter + } + + if root.Input.Type() != nodeFilter { + *props &= ^builderProps.PosFilter + } + + if (propsCond & builderProps.HasPosition) != 0 { + *props |= builderProps.PosFilter + } + + if (propsCond & builderProps.HasPosition) != builderProps.None { + if (propsCond & builderProps.HasLast) != 0 { + // https://github.com/antchfx/xpath/issues/76 + // https://github.com/antchfx/xpath/issues/78 + if qyFunc, ok := cond.(*functionQuery); ok { + switch qyFunc.Input.(type) { + case *filterQuery: + cond = &lastFuncQuery{Input: qyFunc.Input} + } + } + } + } + + merge := (qyInput.Properties() & queryProps.Merge) != 0 + if first && firstInput != nil { + if merge && ((*props & builderProps.PosFilter) != 0) { + var ( + rootQuery = &contextQuery{} + parent query + ) + switch axisQuery := firstInput.(type) { + case *ancestorQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *attributeQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *childQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *cachedChildQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *descendantQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *followingQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *precedingQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *parentQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *selfQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *groupQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + case *descendantOverDescendantQuery: + if _, ok := axisQuery.Input.(*contextQuery); !ok { + parent = axisQuery.Input + axisQuery.Input = rootQuery + } + } + b.firstInput = nil + child := &filterQuery{Input: qyInput, Predicate: cond, NoPosition: false} + if parent != nil { + return &mergeQuery{Input: parent, Child: child}, nil + } + return child, nil + } + b.firstInput = nil + } + + resultQuery := &filterQuery{ + Input: qyInput, + Predicate: cond, + NoPosition: (propsCond & builderProps.HasPosition) == 0, + } + return resultQuery, nil +} + +// processFunctionNode processes query for the XPath function node. +func (b *builder) processFunction(root *functionNode, props *builderProp) (query, error) { + // Reset builder props + *props = builderProps.None + + var qyOutput query + switch root.FuncName { + case "lower-case": + arg, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: lowerCaseFunc(arg)} + case "starts-with": + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: startwithFunc(arg1, arg2)} + case "ends-with": + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: endwithFunc(arg1, arg2)} + case "contains": + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: containsFunc(arg1, arg2)} + case "matches": + //matches(string , pattern) + if len(root.Args) != 2 { + return nil, errors.New("xpath: matches function must have two parameters") + } + var ( + arg1, arg2 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + // Issue #92, testing the regular expression before. + if q, ok := arg2.(*constantQuery); ok { + if _, err = getRegexp(q.Val.(string)); err != nil { + return nil, fmt.Errorf("matches() got error. %v", err) + } + } + qyOutput = &functionQuery{Func: matchesFunc(arg1, arg2)} + case "substring": + //substring( string , start [, length] ) + if len(root.Args) < 2 { + return nil, errors.New("xpath: substring function must have at least two parameter") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + if len(root.Args) == 3 { + if arg3, err = b.processNode(root.Args[2], flagsEnum.None, props); err != nil { + return nil, err + } + } + qyOutput = &functionQuery{Func: substringFunc(arg1, arg2, arg3)} + case "substring-before", "substring-after": + //substring-xxxx( haystack, needle ) + if len(root.Args) != 2 { + return nil, errors.New("xpath: substring-before function must have two parameters") + } + var ( + arg1, arg2 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + qyOutput = &functionQuery{ + Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"), + } + case "string-length": + // string-length( [string] ) + if len(root.Args) < 1 { + return nil, errors.New("xpath: string-length function must have at least one parameter") + } + arg1, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: stringLengthFunc(arg1)} + case "normalize-space": + var arg node + if len(root.Args) > 0 { + arg = root.Args[0] + } else { + arg = newAxisNode("self", allNode, "", "", "", nil) + } + arg1, err := b.processNode(arg, flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: normalizespaceFunc(arg1)} + case "replace": + //replace( string , string, string ) + if len(root.Args) != 3 { + return nil, errors.New("xpath: replace function must have three parameters") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + if arg3, err = b.processNode(root.Args[2], flagsEnum.None, props); err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: replaceFunc(arg1, arg2, arg3)} + case "translate": + //translate( string , string, string ) + if len(root.Args) != 3 { + return nil, errors.New("xpath: translate function must have three parameters") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0], flagsEnum.None, props); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1], flagsEnum.None, props); err != nil { + return nil, err + } + if arg3, err = b.processNode(root.Args[2], flagsEnum.None, props); err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: translateFunc(arg1, arg2, arg3)} + case "not": + if len(root.Args) == 0 { + return nil, errors.New("xpath: not function must have at least one parameter") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: notFunc(argQuery)} + case "name", "local-name", "namespace-uri": + if len(root.Args) > 1 { + return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) + } + var ( + arg query + err error + ) + if len(root.Args) == 1 { + arg, err = b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + } + switch root.FuncName { + case "name": + qyOutput = &functionQuery{Func: nameFunc(arg)} + case "local-name": + qyOutput = &functionQuery{Func: localNameFunc(arg)} + case "namespace-uri": + qyOutput = &functionQuery{Func: namespaceFunc(arg)} + } + case "true", "false": + val := root.FuncName == "true" + qyOutput = &functionQuery{ + Func: func(_ query, _ iterator) interface{} { + return val + }, + } + case "last": + qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc()} + *props |= builderProps.HasLast + case "position": + qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc()} + *props |= builderProps.HasPosition + case "boolean", "number", "string": + var inp query + if len(root.Args) > 1 { + return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) + } + if len(root.Args) == 1 { + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + inp = argQuery + } + switch root.FuncName { + case "boolean": + qyOutput = &functionQuery{Func: booleanFunc(inp)} + case "string": + qyOutput = &functionQuery{Func: stringFunc(inp)} + case "number": + qyOutput = &functionQuery{Func: numberFunc(inp)} + } + case "count": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: countFunc(argQuery)} + case "sum": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: sumFunc(argQuery)} + case "ceiling", "floor", "round": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + switch root.FuncName { + case "ceiling": + qyOutput = &functionQuery{Func: ceilingFunc(argQuery)} + case "floor": + qyOutput = &functionQuery{Func: floorFunc(argQuery)} + case "round": + qyOutput = &functionQuery{Func: roundFunc(argQuery)} + } + case "concat": + if len(root.Args) < 2 { + return nil, fmt.Errorf("xpath: concat() must have at least two arguments") + } + var args []query + for _, v := range root.Args { + q, err := b.processNode(v, flagsEnum.None, props) + if err != nil { + return nil, err + } + args = append(args, q) + } + qyOutput = &functionQuery{Func: concatFunc(args...)} + case "reverse": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: reverse(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &transformFunctionQuery{Input: argQuery, Func: reverseFunc} + case "string-join": + if len(root.Args) != 2 { + return nil, fmt.Errorf("xpath: string-join(node-sets, separator) function requires node-set and argument") + } + input, err := b.processNode(root.Args[0], flagsEnum.None, props) + if err != nil { + return nil, err + } + arg1, err := b.processNode(root.Args[1], flagsEnum.None, props) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Func: stringJoinFunc(input, arg1)} + default: + return nil, fmt.Errorf("not yet support this function %s()", root.FuncName) + } + return qyOutput, nil +} + +func (b *builder) processOperator(root *operatorNode, props *builderProp) (query, error) { + var ( + leftProp builderProp + rightProp builderProp + ) + + left, err := b.processNode(root.Left, flagsEnum.None, &leftProp) + if err != nil { + return nil, err + } + right, err := b.processNode(root.Right, flagsEnum.None, &rightProp) + if err != nil { + return nil, err + } + *props = leftProp | rightProp + + var qyOutput query + switch root.Op { + case "+", "-", "*", "div", "mod": // Numeric operator + var exprFunc func(iterator, interface{}, interface{}) interface{} + switch root.Op { + case "+": + exprFunc = plusFunc + case "-": + exprFunc = minusFunc + case "*": + exprFunc = mulFunc + case "div": + exprFunc = divFunc + case "mod": + exprFunc = modFunc + } + qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc} + case "=", ">", ">=", "<", "<=", "!=": + var exprFunc func(iterator, interface{}, interface{}) interface{} + switch root.Op { + case "=": + exprFunc = eqFunc + case ">": + exprFunc = gtFunc + case ">=": + exprFunc = geFunc + case "<": + exprFunc = ltFunc + case "<=": + exprFunc = leFunc + case "!=": + exprFunc = neFunc + } + qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc} + case "or", "and": + isOr := false + if root.Op == "or" { + isOr = true + } + qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr} + case "|": + *props |= builderProps.NonFlat + qyOutput = &unionQuery{Left: left, Right: right} + } + return qyOutput, nil +} + +func (b *builder) processNode(root node, flags flag, props *builderProp) (q query, err error) { + if b.parseDepth = b.parseDepth + 1; b.parseDepth > 1024 { + err = errors.New("the xpath expressions is too complex") + return + } + *props = builderProps.None + switch root.Type() { + case nodeConstantOperand: + n := root.(*operandNode) + q = &constantQuery{Val: n.Val} + case nodeRoot: + q = &absoluteQuery{} + case nodeAxis: + q, err = b.processAxis(root.(*axisNode), flags, props) + b.firstInput = q + case nodeFilter: + q, err = b.processFilter(root.(*filterNode), flags, props) + b.firstInput = q + case nodeFunction: + q, err = b.processFunction(root.(*functionNode), props) + case nodeOperator: + q, err = b.processOperator(root.(*operatorNode), props) + case nodeGroup: + q, err = b.processNode(root.(*groupNode).Input, flagsEnum.None, props) + if err != nil { + return + } + q = &groupQuery{Input: q} + if b.firstInput == nil { + b.firstInput = q + } + } + b.parseDepth-- + return +} + +// build builds a specified XPath expressions expr. +func build(expr string, namespaces map[string]string) (q query, err error) { + defer func() { + if e := recover(); e != nil { + switch x := e.(type) { + case string: + err = errors.New(x) + case error: + err = x + default: + err = errors.New("unknown panic") + } + } + }() + root := parse(expr, namespaces) + b := &builder{} + props := builderProps.None + return b.processNode(root, flagsEnum.None, &props) +} diff --git a/vendor/github.com/antchfx/xpath/cache.go b/vendor/github.com/antchfx/xpath/cache.go new file mode 100644 index 00000000000..31a2b335656 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/cache.go @@ -0,0 +1,80 @@ +package xpath + +import ( + "regexp" + "sync" +) + +type loadFunc func(key interface{}) (interface{}, error) + +const ( + defaultCap = 65536 +) + +// The reason we're building a simple capacity-resetting loading cache (when capacity reached) instead of using +// something like github.com/hashicorp/golang-lru is primarily due to (not wanting to create) external dependency. +// Currently this library has 0 external dep (other than go sdk), and supports go 1.6, 1.9, and 1.10 (and later). +// Creating external lib dependencies (plus their transitive dependencies) would make things hard if not impossible. +// We expect under most circumstances, the defaultCap is big enough for any long running services that use this +// library if their xpath regexp cardinality is low. However, in extreme cases when the capacity is reached, we +// simply reset the cache, taking a small subsequent perf hit (next to nothing considering amortization) in trade +// of more complex and less performant LRU type of construct. +type loadingCache struct { + sync.RWMutex + cap int + load loadFunc + m map[interface{}]interface{} + reset int +} + +// NewLoadingCache creates a new instance of a loading cache with capacity. Capacity must be >= 0, or +// it will panic. Capacity == 0 means the cache growth is unbounded. +func NewLoadingCache(load loadFunc, capacity int) *loadingCache { + if capacity < 0 { + panic("capacity must be >= 0") + } + return &loadingCache{cap: capacity, load: load, m: make(map[interface{}]interface{})} +} + +func (c *loadingCache) get(key interface{}) (interface{}, error) { + c.RLock() + v, found := c.m[key] + c.RUnlock() + if found { + return v, nil + } + v, err := c.load(key) + if err != nil { + return nil, err + } + c.Lock() + if c.cap > 0 && len(c.m) >= c.cap { + c.m = map[interface{}]interface{}{key: v} + c.reset++ + } else { + c.m[key] = v + } + c.Unlock() + return v, nil +} + +var ( + // RegexpCache is a loading cache for string -> *regexp.Regexp mapping. It is exported so that in rare cases + // client can customize load func and/or capacity. + RegexpCache = defaultRegexpCache() +) + +func defaultRegexpCache() *loadingCache { + return NewLoadingCache( + func(key interface{}) (interface{}, error) { + return regexp.Compile(key.(string)) + }, defaultCap) +} + +func getRegexp(pattern string) (*regexp.Regexp, error) { + exp, err := RegexpCache.get(pattern) + if err != nil { + return nil, err + } + return exp.(*regexp.Regexp), nil +} diff --git a/vendor/github.com/antchfx/xpath/func.go b/vendor/github.com/antchfx/xpath/func.go new file mode 100644 index 00000000000..4079a194f58 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func.go @@ -0,0 +1,679 @@ +package xpath + +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + "unicode" +) + +// Defined an interface of stringBuilder that compatible with +// strings.Builder(go 1.10) and bytes.Buffer(< go 1.10) +type stringBuilder interface { + WriteRune(r rune) (n int, err error) + WriteString(s string) (int, error) + Reset() + Grow(n int) + String() string +} + +var builderPool = sync.Pool{New: func() interface{} { + return newStringBuilder() +}} + +// The XPath function list. + +func predicate(q query) func(NodeNavigator) bool { + type Predicater interface { + Test(NodeNavigator) bool + } + if p, ok := q.(Predicater); ok { + return p.Test + } + return func(NodeNavigator) bool { return true } +} + +// positionFunc is a XPath Node Set functions position(). +func positionFunc() func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + count = 1 + node = t.Current().Copy() + ) + test := predicate(q) + for node.MoveToPrevious() { + if test(node) { + count++ + } + } + return float64(count) + } +} + +// lastFunc is a XPath Node Set functions last(). +func lastFunc() func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + count = 0 + node = t.Current().Copy() + ) + test := predicate(q) + node.MoveToFirst() + for { + if test(node) { + count++ + } + if !node.MoveToNext() { + break + } + } + return float64(count) + } +} + +// countFunc is a XPath Node Set functions count(node-set). +func countFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var count = 0 + q := functionArgs(arg) + test := predicate(q) + switch typ := q.Evaluate(t).(type) { + case query: + for node := typ.Select(t); node != nil; node = typ.Select(t) { + if test(node) { + count++ + } + } + } + return float64(count) + } +} + +// sumFunc is a XPath Node Set functions sum(node-set). +func sumFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var sum float64 + switch typ := functionArgs(arg).Evaluate(t).(type) { + case query: + for node := typ.Select(t); node != nil; node = typ.Select(t) { + if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { + sum += v + } + } + case float64: + sum = typ + case string: + v, err := strconv.ParseFloat(typ, 64) + if err != nil { + panic(errors.New("sum() function argument type must be a node-set or number")) + } + sum = v + } + return sum + } +} + +func asNumber(t iterator, o interface{}) float64 { + switch typ := o.(type) { + case query: + node := typ.Select(t) + if node == nil { + return math.NaN() + } + if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { + return v + } + case float64: + return typ + case string: + v, err := strconv.ParseFloat(typ, 64) + if err == nil { + return v + } + } + return math.NaN() +} + +// ceilingFunc is a XPath Node Set functions ceiling(node-set). +func ceilingFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + val := asNumber(t, functionArgs(arg).Evaluate(t)) + // if math.IsNaN(val) { + // panic(errors.New("ceiling() function argument type must be a valid number")) + // } + return math.Ceil(val) + } +} + +// floorFunc is a XPath Node Set functions floor(node-set). +func floorFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + val := asNumber(t, functionArgs(arg).Evaluate(t)) + return math.Floor(val) + } +} + +// roundFunc is a XPath Node Set functions round(node-set). +func roundFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + val := asNumber(t, functionArgs(arg).Evaluate(t)) + //return math.Round(val) + return round(val) + } +} + +// nameFunc is a XPath functions name([node-set]). +func nameFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var v NodeNavigator + if arg == nil { + v = t.Current() + } else { + v = arg.Clone().Select(t) + if v == nil { + return "" + } + } + ns := v.Prefix() + if ns == "" { + return v.LocalName() + } + return ns + ":" + v.LocalName() + } +} + +// localNameFunc is a XPath functions local-name([node-set]). +func localNameFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var v NodeNavigator + if arg == nil { + v = t.Current() + } else { + v = arg.Clone().Select(t) + if v == nil { + return "" + } + } + return v.LocalName() + } +} + +// namespaceFunc is a XPath functions namespace-uri([node-set]). +func namespaceFunc(arg query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var v NodeNavigator + if arg == nil { + v = t.Current() + } else { + // Get the first node in the node-set if specified. + v = arg.Clone().Select(t) + if v == nil { + return "" + } + } + // fix about namespace-uri() bug: https://github.com/antchfx/xmlquery/issues/22 + // TODO: In the next version, add NamespaceURL() to the NodeNavigator interface. + type namespaceURL interface { + NamespaceURL() string + } + if f, ok := v.(namespaceURL); ok { + return f.NamespaceURL() + } + return v.Prefix() + } +} + +func asBool(t iterator, v interface{}) bool { + switch v := v.(type) { + case nil: + return false + case *NodeIterator: + return v.MoveNext() + case bool: + return v + case float64: + return v != 0 + case string: + return v != "" + case query: + return v.Select(t) != nil + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } +} + +func asString(t iterator, v interface{}) string { + switch v := v.(type) { + case nil: + return "" + case bool: + if v { + return "true" + } + return "false" + case float64: + return strconv.FormatFloat(v, 'g', -1, 64) + case string: + return v + case query: + node := v.Select(t) + if node == nil { + return "" + } + return node.Value() + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } +} + +// booleanFunc is a XPath functions boolean([node-set]). +func booleanFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return asBool(t, v) + } +} + +// numberFunc is a XPath functions number([node-set]). +func numberFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return asNumber(t, v) + } +} + +// stringFunc is a XPath functions string([node-set]). +func stringFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return asString(t, v) + } +} + +// startwithFunc is a XPath functions starts-with(string, string). +func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("starts-with() function argument type must be string")) + } + n, ok = functionArgs(arg2).Evaluate(t).(string) + if !ok { + panic(errors.New("starts-with() function argument type must be string")) + } + return strings.HasPrefix(m, n) + } +} + +// endwithFunc is a XPath functions ends-with(string, string). +func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("ends-with() function argument type must be string")) + } + n, ok = functionArgs(arg2).Evaluate(t).(string) + if !ok { + panic(errors.New("ends-with() function argument type must be string")) + } + return strings.HasSuffix(m, n) + } +} + +// containsFunc is a XPath functions contains(string or @attr, string). +func containsFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("contains() function argument type must be string")) + } + + n, ok = functionArgs(arg2).Evaluate(t).(string) + if !ok { + panic(errors.New("contains() function argument type must be string")) + } + + return strings.Contains(m, n) + } +} + +// matchesFunc is an XPath function that tests a given string against a regexp pattern. +// Note: does not support https://www.w3.org/TR/xpath-functions-31/#func-matches 3rd optional `flags` argument; if +// needed, directly put flags in the regexp pattern, such as `(?i)^pattern$` for `i` flag. +func matchesFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var s string + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + s = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + s = node.Value() + } + var pattern string + var ok bool + if pattern, ok = functionArgs(arg2).Evaluate(t).(string); !ok { + panic(errors.New("matches() function second argument type must be string")) + } + re, err := getRegexp(pattern) + if err != nil { + panic(fmt.Errorf("matches() function second argument is not a valid regexp pattern, err: %s", err.Error())) + } + return re.MatchString(s) + } +} + +// normalizespaceFunc is XPath functions normalize-space(string?) +func normalizespaceFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var m string + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + m = node.Value() + } + var b = builderPool.Get().(stringBuilder) + b.Grow(len(m)) + + runeStr := []rune(strings.TrimSpace(m)) + l := len(runeStr) + for i := range runeStr { + r := runeStr[i] + isSpace := unicode.IsSpace(r) + if !(isSpace && (i+1 < l && unicode.IsSpace(runeStr[i+1]))) { + if isSpace { + r = ' ' + } + b.WriteRune(r) + } + } + result := b.String() + b.Reset() + builderPool.Put(b) + + return result + } +} + +// substringFunc is XPath functions substring function returns a part of a given string. +func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var m string + switch typ := functionArgs(arg1).Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + m = node.Value() + } + + var start, length float64 + var ok bool + + if start, ok = functionArgs(arg2).Evaluate(t).(float64); !ok { + panic(errors.New("substring() function first argument type must be int")) + } else if start < 1 { + panic(errors.New("substring() function first argument type must be >= 1")) + } + start-- + if arg3 != nil { + if length, ok = functionArgs(arg3).Evaluate(t).(float64); !ok { + panic(errors.New("substring() function second argument type must be int")) + } + } + if (len(m) - int(start)) < int(length) { + panic(errors.New("substring() function start and length argument out of range")) + } + if length > 0 { + return m[int(start):int(length+start)] + } + return m[int(start):] + } +} + +// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string. +func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var str string + switch v := functionArgs(arg1).Evaluate(t).(type) { + case string: + str = v + case query: + node := v.Select(t) + if node == nil { + return "" + } + str = node.Value() + } + var word string + switch v := functionArgs(arg2).Evaluate(t).(type) { + case string: + word = v + case query: + node := v.Select(t) + if node == nil { + return "" + } + word = node.Value() + } + if word == "" { + return "" + } + + i := strings.Index(str, word) + if i < 0 { + return "" + } + if after { + return str[i+len(word):] + } + return str[:i] + } +} + +// stringLengthFunc is XPATH string-length( [string] ) function that returns a number +// equal to the number of characters in a given string. +func stringLengthFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + switch v := functionArgs(arg1).Evaluate(t).(type) { + case string: + return float64(len(v)) + case query: + node := v.Select(t) + if node == nil { + break + } + return float64(len(node.Value())) + } + return float64(0) + } +} + +// translateFunc is XPath functions translate() function returns a replaced string. +func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + str := asString(t, functionArgs(arg1).Evaluate(t)) + src := asString(t, functionArgs(arg2).Evaluate(t)) + dst := asString(t, functionArgs(arg3).Evaluate(t)) + + replace := make([]string, 0, len(src)) + for i, s := range src { + d := "" + if i < len(dst) { + d = string(dst[i]) + } + replace = append(replace, string(s), d) + } + return strings.NewReplacer(replace...).Replace(str) + } +} + +// replaceFunc is XPath functions replace() function returns a replaced string. +func replaceFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + str := asString(t, functionArgs(arg1).Evaluate(t)) + src := asString(t, functionArgs(arg2).Evaluate(t)) + dst := asString(t, functionArgs(arg3).Evaluate(t)) + + return strings.Replace(str, src, dst, -1) + } +} + +// notFunc is XPATH functions not(expression) function operation. +func notFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + switch v := functionArgs(arg1).Evaluate(t).(type) { + case bool: + return !v + case query: + node := v.Select(t) + return node == nil + default: + return false + } + } +} + +// concatFunc is the concat function concatenates two or more +// strings and returns the resulting string. +// concat( string1 , string2 [, stringn]* ) +func concatFunc(args ...query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + b := builderPool.Get().(stringBuilder) + for _, v := range args { + v = functionArgs(v) + + switch v := v.Evaluate(t).(type) { + case string: + b.WriteString(v) + case query: + node := v.Select(t) + if node != nil { + b.WriteString(node.Value()) + } + } + } + result := b.String() + b.Reset() + builderPool.Put(b) + + return result + } +} + +// https://github.com/antchfx/xpath/issues/43 +func functionArgs(q query) query { + if _, ok := q.(*functionQuery); ok { + return q + } + return q.Clone() +} + +func reverseFunc(q query, t iterator) func() NodeNavigator { + var list []NodeNavigator + for { + node := q.Select(t) + if node == nil { + break + } + list = append(list, node.Copy()) + } + i := len(list) + return func() NodeNavigator { + if i <= 0 { + return nil + } + i-- + node := list[i] + return node + } +} + +// string-join is a XPath Node Set functions string-join(node-set, separator). +func stringJoinFunc(q, arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + var separator string + switch v := functionArgs(arg1).Evaluate(t).(type) { + case string: + separator = v + case query: + node := v.Select(t) + if node != nil { + separator = node.Value() + } + } + + q = functionArgs(q) + test := predicate(q) + var parts []string + switch v := q.Evaluate(t).(type) { + case string: + return v + case query: + for node := v.Select(t); node != nil; node = v.Select(t) { + if test(node) { + parts = append(parts, node.Value()) + } + } + } + return strings.Join(parts, separator) + } +} + +// lower-case is XPATH function that converts a string to lower case. +func lowerCaseFunc(arg1 query) func(query, iterator) interface{} { + return func(_ query, t iterator) interface{} { + v := functionArgs(arg1).Evaluate(t) + return strings.ToLower(asString(t, v)) + } +} diff --git a/vendor/github.com/antchfx/xpath/func_go110.go b/vendor/github.com/antchfx/xpath/func_go110.go new file mode 100644 index 00000000000..d6ca4513931 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func_go110.go @@ -0,0 +1,16 @@ +// +build go1.10 + +package xpath + +import ( + "math" + "strings" +) + +func round(f float64) int { + return int(math.Round(f)) +} + +func newStringBuilder() stringBuilder { + return &strings.Builder{} +} diff --git a/vendor/github.com/antchfx/xpath/func_pre_go110.go b/vendor/github.com/antchfx/xpath/func_pre_go110.go new file mode 100644 index 00000000000..335141f79f5 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func_pre_go110.go @@ -0,0 +1,22 @@ +// +build !go1.10 + +package xpath + +import ( + "bytes" + "math" +) + +// math.Round() is supported by Go 1.10+, +// This method just compatible for version <1.10. +// https://github.com/golang/go/issues/20100 +func round(f float64) int { + if math.Abs(f) < 0.5 { + return 0 + } + return int(f + math.Copysign(0.5, f)) +} + +func newStringBuilder() stringBuilder { + return &bytes.Buffer{} +} diff --git a/vendor/github.com/antchfx/xpath/operator.go b/vendor/github.com/antchfx/xpath/operator.go new file mode 100644 index 00000000000..2820152b369 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/operator.go @@ -0,0 +1,288 @@ +package xpath + +import ( + "strconv" +) + +// The XPath number operator function list. + +type logical func(iterator, string, interface{}, interface{}) bool + +var logicalFuncs = [][]logical{ + {cmpBooleanBoolean, nil, nil, nil}, + {nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet}, + {nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet}, + {nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet}, +} + +// number vs number +func cmpNumberNumberF(op string, a, b float64) bool { + switch op { + case "=": + return a == b + case ">": + return a > b + case "<": + return a < b + case ">=": + return a >= b + case "<=": + return a <= b + case "!=": + return a != b + } + return false +} + +// string vs string +func cmpStringStringF(op string, a, b string) bool { + switch op { + case "=": + return a == b + case ">": + return a > b + case "<": + return a < b + case ">=": + return a >= b + case "<=": + return a <= b + case "!=": + return a != b + } + return false +} + +func cmpBooleanBooleanF(op string, a, b bool) bool { + switch op { + case "or": + return a || b + case "and": + return a && b + } + return false +} + +func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(float64) + return cmpNumberNumberF(op, a, b) +} + +func cmpNumericString(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(string) + num, err := strconv.ParseFloat(b, 64) + if err != nil { + panic(err) + } + return cmpNumberNumberF(op, a, num) +} + +func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(query) + + for { + node := b.Select(t) + if node == nil { + break + } + num, err := strconv.ParseFloat(node.Value(), 64) + if err != nil { + panic(err) + } + if cmpNumberNumberF(op, a, num) { + return true + } + } + return false +} + +func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(float64) + for { + node := a.Select(t) + if node == nil { + break + } + num, err := strconv.ParseFloat(node.Value(), 64) + if err != nil { + panic(err) + } + if cmpNumberNumberF(op, num, b) { + return true + } + } + return false +} + +func cmpNodeSetString(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(string) + for { + node := a.Select(t) + if node == nil { + break + } + if cmpStringStringF(op, b, node.Value()) { + return true + } + } + return false +} + +func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(query) + for { + x := a.Select(t) + if x == nil { + return false + } + + y := b.Select(t) + if y == nil { + return false + } + + for { + if cmpStringStringF(op, x.Value(), y.Value()) { + return true + } + if y = b.Select(t); y == nil { + break + } + } + // reset + b.Evaluate(t) + } +} + +func cmpStringNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(float64) + num, err := strconv.ParseFloat(a, 64) + if err != nil { + panic(err) + } + return cmpNumberNumberF(op, b, num) +} + +func cmpStringString(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(string) + return cmpStringStringF(op, a, b) +} + +func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(query) + for { + node := b.Select(t) + if node == nil { + break + } + if cmpStringStringF(op, a, node.Value()) { + return true + } + } + return false +} + +func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool { + a := m.(bool) + b := n.(bool) + return cmpBooleanBooleanF(op, a, b) +} + +// eqFunc is an `=` operator. +func eqFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "=", m, n) +} + +// gtFunc is an `>` operator. +func gtFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, ">", m, n) +} + +// geFunc is an `>=` operator. +func geFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, ">=", m, n) +} + +// ltFunc is an `<` operator. +func ltFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "<", m, n) +} + +// leFunc is an `<=` operator. +func leFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "<=", m, n) +} + +// neFunc is an `!=` operator. +func neFunc(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "!=", m, n) +} + +// orFunc is an `or` operator. +var orFunc = func(t iterator, m, n interface{}) interface{} { + t1 := getXPathType(m) + t2 := getXPathType(n) + return logicalFuncs[t1][t2](t, "or", m, n) +} + +func numericExpr(t iterator, m, n interface{}, cb func(float64, float64) float64) float64 { + a := asNumber(t, m) + b := asNumber(t, n) + return cb(a, b) +} + +// plusFunc is an `+` operator. +var plusFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a + b + }) +} + +// minusFunc is an `-` operator. +var minusFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a - b + }) +} + +// mulFunc is an `*` operator. +var mulFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a * b + }) +} + +// divFunc is an `DIV` operator. +var divFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return a / b + }) +} + +// modFunc is an 'MOD' operator. +var modFunc = func(t iterator, m, n interface{}) interface{} { + return numericExpr(t, m, n, func(a, b float64) float64 { + return float64(int(a) % int(b)) + }) +} diff --git a/vendor/github.com/antchfx/xpath/parse.go b/vendor/github.com/antchfx/xpath/parse.go new file mode 100644 index 00000000000..5b44cd8268a --- /dev/null +++ b/vendor/github.com/antchfx/xpath/parse.go @@ -0,0 +1,1256 @@ +package xpath + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "unicode" + "unicode/utf8" +) + +// A XPath expression token type. +type itemType int + +const ( + itemComma itemType = iota // ',' + itemSlash // '/' + itemAt // '@' + itemDot // '.' + itemLParens // '(' + itemRParens // ')' + itemLBracket // '[' + itemRBracket // ']' + itemStar // '*' + itemPlus // '+' + itemMinus // '-' + itemEq // '=' + itemLt // '<' + itemGt // '>' + itemBang // '!' + itemDollar // '$' + itemApos // '\'' + itemQuote // '"' + itemUnion // '|' + itemNe // '!=' + itemLe // '<=' + itemGe // '>=' + itemAnd // '&&' + itemOr // '||' + itemDotDot // '..' + itemSlashSlash // '//' + itemName // XML Name + itemString // Quoted string constant + itemNumber // Number constant + itemAxe // Axe (like child::) + itemEOF // END +) + +// A node is an XPath node in the parse tree. +type node interface { + Type() nodeType +} + +// nodeType identifies the type of a parse tree node. +type nodeType int + +func (t nodeType) Type() nodeType { + return t +} + +const ( + nodeRoot nodeType = iota + nodeAxis + nodeFilter + nodeFunction + nodeOperator + nodeVariable + nodeConstantOperand + nodeGroup +) + +type parser struct { + r *scanner + d int + namespaces map[string]string +} + +// newOperatorNode returns new operator node OperatorNode. +func newOperatorNode(op string, left, right node) node { + return &operatorNode{nodeType: nodeOperator, Op: op, Left: left, Right: right} +} + +// newOperand returns new constant operand node OperandNode. +func newOperandNode(v interface{}) node { + return &operandNode{nodeType: nodeConstantOperand, Val: v} +} + +// newAxisNode returns new axis node AxisNode. +func newAxisNode(axisType string, typeTest NodeType, localName, prefix, prop string, n node, opts ...func(p *axisNode)) node { + a := axisNode{ + nodeType: nodeAxis, + typeTest: typeTest, + LocalName: localName, + Prefix: prefix, + AxisType: axisType, + Prop: prop, + Input: n, + } + for _, o := range opts { + o(&a) + } + return &a +} + +// newVariableNode returns new variable node VariableNode. +func newVariableNode(prefix, name string) node { + return &variableNode{nodeType: nodeVariable, Name: name, Prefix: prefix} +} + +// newFilterNode returns a new filter node FilterNode. +func newFilterNode(n, m node) node { + return &filterNode{nodeType: nodeFilter, Input: n, Condition: m} +} + +func newGroupNode(n node) node { + return &groupNode{nodeType: nodeGroup, Input: n} +} + +// newRootNode returns a root node. +func newRootNode(s string) node { + return &rootNode{nodeType: nodeRoot, slash: s} +} + +// newFunctionNode returns function call node. +func newFunctionNode(name, prefix string, args []node) node { + return &functionNode{nodeType: nodeFunction, Prefix: prefix, FuncName: name, Args: args} +} + +// testOp reports whether current item name is an operand op. +func testOp(r *scanner, op string) bool { + return r.typ == itemName && r.prefix == "" && r.name == op +} + +func isPrimaryExpr(r *scanner) bool { + switch r.typ { + case itemString, itemNumber, itemDollar, itemLParens: + return true + case itemName: + return r.canBeFunc && !isNodeType(r) + } + return false +} + +func isNodeType(r *scanner) bool { + switch r.name { + case "node", "text", "processing-instruction", "comment": + return r.prefix == "" + } + return false +} + +func isStep(item itemType) bool { + switch item { + case itemDot, itemDotDot, itemAt, itemAxe, itemStar, itemName: + return true + } + return false +} + +func checkItem(r *scanner, typ itemType) { + if r.typ != typ { + panic(fmt.Sprintf("%s has an invalid token", r.text)) + } +} + +// parseExpression parsing the expression with input node n. +func (p *parser) parseExpression(n node) node { + if p.d = p.d + 1; p.d > 200 { + panic("the xpath query is too complex(depth > 200)") + } + n = p.parseOrExpr(n) + p.d-- + return n +} + +// next scanning next item on forward. +func (p *parser) next() bool { + return p.r.nextItem() +} + +func (p *parser) skipItem(typ itemType) { + checkItem(p.r, typ) + p.next() +} + +// OrExpr ::= AndExpr | OrExpr 'or' AndExpr +func (p *parser) parseOrExpr(n node) node { + opnd := p.parseAndExpr(n) + for { + if !testOp(p.r, "or") { + break + } + p.next() + opnd = newOperatorNode("or", opnd, p.parseAndExpr(n)) + } + return opnd +} + +// AndExpr ::= EqualityExpr | AndExpr 'and' EqualityExpr +func (p *parser) parseAndExpr(n node) node { + opnd := p.parseEqualityExpr(n) + for { + if !testOp(p.r, "and") { + break + } + p.next() + opnd = newOperatorNode("and", opnd, p.parseEqualityExpr(n)) + } + return opnd +} + +// EqualityExpr ::= RelationalExpr | EqualityExpr '=' RelationalExpr | EqualityExpr '!=' RelationalExpr +func (p *parser) parseEqualityExpr(n node) node { + opnd := p.parseRelationalExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemEq: + op = "=" + case itemNe: + op = "!=" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseRelationalExpr(n)) + } + return opnd +} + +// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr +// +// | RelationalExpr '<=' AdditiveExpr +// | RelationalExpr '>=' AdditiveExpr +func (p *parser) parseRelationalExpr(n node) node { + opnd := p.parseAdditiveExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemLt: + op = "<" + case itemGt: + op = ">" + case itemLe: + op = "<=" + case itemGe: + op = ">=" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseAdditiveExpr(n)) + } + return opnd +} + +// AdditiveExpr ::= MultiplicativeExpr | AdditiveExpr '+' MultiplicativeExpr | AdditiveExpr '-' MultiplicativeExpr +func (p *parser) parseAdditiveExpr(n node) node { + opnd := p.parseMultiplicativeExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemPlus: + op = "+" + case itemMinus: + op = "-" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseMultiplicativeExpr(n)) + } + return opnd +} + +// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr +// +// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr +func (p *parser) parseMultiplicativeExpr(n node) node { + opnd := p.parseUnaryExpr(n) +Loop: + for { + var op string + if p.r.typ == itemStar { + op = "*" + } else if testOp(p.r, "div") || testOp(p.r, "mod") { + op = p.r.name + } else { + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseUnaryExpr(n)) + } + return opnd +} + +// UnaryExpr ::= UnionExpr | '-' UnaryExpr +func (p *parser) parseUnaryExpr(n node) node { + minus := false + // ignore '-' sequence + for p.r.typ == itemMinus { + p.next() + minus = !minus + } + opnd := p.parseUnionExpr(n) + if minus { + opnd = newOperatorNode("*", opnd, newOperandNode(float64(-1))) + } + return opnd +} + +// UnionExpr ::= PathExpr | UnionExpr '|' PathExpr +func (p *parser) parseUnionExpr(n node) node { + opnd := p.parsePathExpr(n) +Loop: + for { + if p.r.typ != itemUnion { + break Loop + } + p.next() + opnd2 := p.parsePathExpr(n) + // Checking the node type that must be is node set type? + opnd = newOperatorNode("|", opnd, opnd2) + } + return opnd +} + +// PathExpr ::= LocationPath | FilterExpr | FilterExpr '/' RelativeLocationPath | FilterExpr '//' RelativeLocationPath +func (p *parser) parsePathExpr(n node) node { + var opnd node + if isPrimaryExpr(p.r) { + opnd = p.parseFilterExpr(n) + switch p.r.typ { + case itemSlash: + p.next() + opnd = p.parseRelativeLocationPath(opnd) + case itemSlashSlash: + p.next() + opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd)) + } + } else { + opnd = p.parseLocationPath(nil) + } + return opnd +} + +// FilterExpr ::= PrimaryExpr | FilterExpr Predicate +func (p *parser) parseFilterExpr(n node) node { + opnd := p.parsePrimaryExpr(n) + if p.r.typ == itemLBracket { + opnd = newFilterNode(opnd, p.parsePredicate(opnd)) + } + return opnd +} + +// Predicate ::= '[' PredicateExpr ']' +func (p *parser) parsePredicate(n node) node { + p.skipItem(itemLBracket) + opnd := p.parseExpression(n) + p.skipItem(itemRBracket) + return opnd +} + +// LocationPath ::= RelativeLocationPath | AbsoluteLocationPath +func (p *parser) parseLocationPath(n node) (opnd node) { + switch p.r.typ { + case itemSlash: + p.next() + opnd = newRootNode("/") + if isStep(p.r.typ) { + opnd = p.parseRelativeLocationPath(opnd) // ?? child:: or self ?? + } + case itemSlashSlash: + p.next() + opnd = newRootNode("//") + opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", allNode, "", "", "", opnd)) + default: + opnd = p.parseRelativeLocationPath(n) + } + return opnd +} + +// RelativeLocationPath ::= Step | RelativeLocationPath '/' Step | AbbreviatedRelativeLocationPath +func (p *parser) parseRelativeLocationPath(n node) node { + opnd := n +Loop: + for { + opnd = p.parseStep(opnd) + switch p.r.typ { + case itemSlashSlash: + p.next() + opnd = newAxisNode("descendant-or-self", allNode, "", "", "", opnd) + case itemSlash: + p.next() + default: + break Loop + } + } + return opnd +} + +// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep +func (p *parser) parseStep(n node) (opnd node) { + if p.r.typ == itemDot || p.r.typ == itemDotDot { + if p.r.typ == itemDot { + opnd = newAxisNode("self", allNode, "", "", "", n) + } else { + opnd = newAxisNode("parent", allNode, "", "", "", n) + } + p.next() + if p.r.typ != itemLBracket { + return opnd + } + } else { + axisType := "child" // default axes value. + switch p.r.typ { + case itemAt: + axisType = "attribute" + p.next() + case itemAxe: + axisType = p.r.name + p.next() + case itemLParens: + return p.parseSequence(n) + } + matchType := ElementNode + if axisType == "attribute" { + matchType = AttributeNode + } + opnd = p.parseNodeTest(n, axisType, matchType) + } + for p.r.typ == itemLBracket { + opnd = newFilterNode(opnd, p.parsePredicate(opnd)) + } + return opnd +} + +// Expr ::= '(' Step ("," Step)* ')' +func (p *parser) parseSequence(n node) (opnd node) { + p.skipItem(itemLParens) + opnd = p.parseStep(n) + for { + if p.r.typ != itemComma { + break + } + p.next() + opnd2 := p.parseStep(n) + opnd = newOperatorNode("|", opnd, opnd2) + } + p.skipItem(itemRParens) + return opnd +} + +// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')' +func (p *parser) parseNodeTest(n node, axeTyp string, matchType NodeType) (opnd node) { + switch p.r.typ { + case itemName: + if p.r.canBeFunc && isNodeType(p.r) { + var prop string + switch p.r.name { + case "comment", "text", "processing-instruction", "node": + prop = p.r.name + } + var name string + p.next() + p.skipItem(itemLParens) + if prop == "processing-instruction" && p.r.typ != itemRParens { + checkItem(p.r, itemString) + name = p.r.strval + p.next() + } + p.skipItem(itemRParens) + switch prop { + case "comment": + matchType = CommentNode + case "text": + matchType = TextNode + case "processing-instruction": + case "node": + matchType = allNode + default: + matchType = RootNode + } + + opnd = newAxisNode(axeTyp, matchType, name, "", prop, n) + } else { + prefix := p.r.prefix + name := p.r.name + p.next() + if p.r.name == "*" { + name = "" + } + opnd = newAxisNode(axeTyp, matchType, name, prefix, "", n, func(a *axisNode) { + if prefix != "" && p.namespaces != nil { + if ns, ok := p.namespaces[prefix]; ok { + a.hasNamespaceURI = true + a.namespaceURI = ns + } else { + panic(fmt.Sprintf("prefix %s not defined.", prefix)) + } + } + }) + } + case itemStar: + opnd = newAxisNode(axeTyp, matchType, "", "", "", n) + p.next() + default: + panic("expression must evaluate to a node-set") + } + return opnd +} + +// PrimaryExpr ::= VariableReference | '(' Expr ')' | Literal | Number | FunctionCall +func (p *parser) parsePrimaryExpr(n node) (opnd node) { + switch p.r.typ { + case itemString: + opnd = newOperandNode(p.r.strval) + p.next() + case itemNumber: + opnd = newOperandNode(p.r.numval) + p.next() + case itemDollar: + p.next() + checkItem(p.r, itemName) + opnd = newVariableNode(p.r.prefix, p.r.name) + p.next() + case itemLParens: + p.next() + opnd = p.parseExpression(n) + if opnd.Type() != nodeConstantOperand { + opnd = newGroupNode(opnd) + } + p.skipItem(itemRParens) + case itemName: + if p.r.canBeFunc && !isNodeType(p.r) { + opnd = p.parseMethod(nil) + } + } + return opnd +} + +// FunctionCall ::= FunctionName '(' ( Argument ( ',' Argument )* )? ')' +func (p *parser) parseMethod(n node) node { + var args []node + name := p.r.name + prefix := p.r.prefix + + p.skipItem(itemName) + p.skipItem(itemLParens) + if p.r.typ != itemRParens { + for { + args = append(args, p.parseExpression(n)) + if p.r.typ == itemRParens { + break + } + p.skipItem(itemComma) + } + } + p.skipItem(itemRParens) + return newFunctionNode(name, prefix, args) +} + +// Parse parsing the XPath express string expr and returns a tree node. +func parse(expr string, namespaces map[string]string) node { + r := &scanner{text: expr} + r.nextChar() + r.nextItem() + p := &parser{r: r, namespaces: namespaces} + return p.parseExpression(nil) +} + +// rootNode holds a top-level node of tree. +type rootNode struct { + nodeType + slash string +} + +func (r *rootNode) String() string { + return r.slash +} + +// operatorNode holds two Nodes operator. +type operatorNode struct { + nodeType + Op string + Left, Right node +} + +func (o *operatorNode) String() string { + return fmt.Sprintf("%v%s%v", o.Left, o.Op, o.Right) +} + +// axisNode holds a location step. +type axisNode struct { + nodeType + Input node + Prop string // node-test name.[comment|text|processing-instruction|node] + AxisType string // name of the axis.[attribute|ancestor|child|....] + LocalName string // local part name of node. + Prefix string // prefix name of node. + namespaceURI string // namespace URI of node + hasNamespaceURI bool // if namespace URI is set (can be "") + typeTest NodeType +} + +func (a *axisNode) String() string { + var b bytes.Buffer + if a.AxisType != "" { + b.Write([]byte(a.AxisType + "::")) + } + if a.Prefix != "" { + b.Write([]byte(a.Prefix + ":")) + } + b.Write([]byte(a.LocalName)) + if a.Prop != "" { + b.Write([]byte("/" + a.Prop + "()")) + } + return b.String() +} + +// operandNode holds a constant operand. +type operandNode struct { + nodeType + Val interface{} +} + +func (o *operandNode) String() string { + return fmt.Sprintf("%v", o.Val) +} + +// groupNode holds a set of node expression +type groupNode struct { + nodeType + Input node +} + +func (g *groupNode) String() string { + return fmt.Sprintf("%s", g.Input) +} + +// filterNode holds a condition filter. +type filterNode struct { + nodeType + Input, Condition node +} + +func (f *filterNode) String() string { + return fmt.Sprintf("%s[%s]", f.Input, f.Condition) +} + +// variableNode holds a variable. +type variableNode struct { + nodeType + Name, Prefix string +} + +func (v *variableNode) String() string { + if v.Prefix == "" { + return v.Name + } + return fmt.Sprintf("%s:%s", v.Prefix, v.Name) +} + +// functionNode holds a function call. +type functionNode struct { + nodeType + Args []node + Prefix string + FuncName string // function name +} + +func (f *functionNode) String() string { + var b bytes.Buffer + // fun(arg1, ..., argn) + b.Write([]byte(f.FuncName)) + b.Write([]byte("(")) + for i, arg := range f.Args { + if i > 0 { + b.Write([]byte(",")) + } + b.Write([]byte(fmt.Sprintf("%s", arg))) + } + b.Write([]byte(")")) + return b.String() +} + +type scanner struct { + text, name, prefix string + + pos int + curr rune + currSize int + typ itemType + strval string // text value at current pos + numval float64 // number value at current pos + canBeFunc bool +} + +func (s *scanner) nextChar() bool { + if s.pos >= len(s.text) { + s.curr = rune(0) + s.currSize = 1 + return false + } + + r, size := rune(s.text[s.pos]), 1 + if r >= 0x80 { // handle multi-byte runes + r, size = utf8.DecodeRuneInString(s.text[s.pos:]) + } + + s.curr = r + s.currSize = size + s.pos += size + return true +} + +func (s *scanner) nextItem() bool { + s.skipSpace() + switch s.curr { + case 0: + s.typ = itemEOF + return false + case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$': + s.typ = asItemType(s.curr) + s.nextChar() + case '<': + s.typ = itemLt + s.nextChar() + if s.curr == '=' { + s.typ = itemLe + s.nextChar() + } + case '>': + s.typ = itemGt + s.nextChar() + if s.curr == '=' { + s.typ = itemGe + s.nextChar() + } + case '!': + s.typ = itemBang + s.nextChar() + if s.curr == '=' { + s.typ = itemNe + s.nextChar() + } + case '.': + s.typ = itemDot + s.nextChar() + if s.curr == '.' { + s.typ = itemDotDot + s.nextChar() + } else if isDigit(s.curr) { + s.typ = itemNumber + s.numval = s.scanFraction() + } + case '/': + s.typ = itemSlash + s.nextChar() + if s.curr == '/' { + s.typ = itemSlashSlash + s.nextChar() + } + case '"', '\'': + s.typ = itemString + s.strval = s.scanString() + default: + if isDigit(s.curr) { + s.typ = itemNumber + s.numval = s.scanNumber() + } else if isName(s.curr) { + s.typ = itemName + s.name = s.scanName() + s.prefix = "" + // "foo:bar" is one itemem not three because it doesn't allow spaces in between + // We should distinct it from "foo::" and need process "foo ::" as well + if s.curr == ':' { + s.nextChar() + // can be "foo:bar" or "foo::" + if s.curr == ':' { + // "foo::" + s.nextChar() + s.typ = itemAxe + } else { // "foo:*", "foo:bar" or "foo: " + s.prefix = s.name + if s.curr == '*' { + s.nextChar() + s.name = "*" + } else if isName(s.curr) { + s.name = s.scanName() + } else { + panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) + } + } + } else { + s.skipSpace() + if s.curr == ':' { + s.nextChar() + // it can be "foo ::" or just "foo :" + if s.curr == ':' { + s.nextChar() + s.typ = itemAxe + } else { + panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) + } + } + } + s.skipSpace() + s.canBeFunc = s.curr == '(' + } else { + panic(fmt.Sprintf("%s has an invalid token.", s.text)) + } + } + return true +} + +func (s *scanner) skipSpace() { +Loop: + for { + if !unicode.IsSpace(s.curr) || !s.nextChar() { + break Loop + } + } +} + +func (s *scanner) scanFraction() float64 { + var ( + i = s.pos - 2 + c = 1 // '.' + ) + for isDigit(s.curr) { + s.nextChar() + c++ + } + v, err := strconv.ParseFloat(s.text[i:i+c], 64) + if err != nil { + panic(fmt.Errorf("xpath: scanFraction parse float got error: %v", err)) + } + return v +} + +func (s *scanner) scanNumber() float64 { + var ( + c int + i = s.pos - 1 + ) + for isDigit(s.curr) { + s.nextChar() + c++ + } + if s.curr == '.' { + s.nextChar() + c++ + for isDigit(s.curr) { + s.nextChar() + c++ + } + } + v, err := strconv.ParseFloat(s.text[i:i+c], 64) + if err != nil { + panic(fmt.Errorf("xpath: scanNumber parse float got error: %v", err)) + } + return v +} + +func (s *scanner) scanString() string { + var ( + c = 0 + end = s.curr + ) + s.nextChar() + i := s.pos - s.currSize + if s.currSize > 1 { + c++ + } + for s.curr != end { + if !s.nextChar() { + panic(errors.New("xpath: scanString got unclosed string")) + } + c += s.currSize + } + s.nextChar() + return s.text[i : i+c] +} + +func (s *scanner) scanName() string { + var ( + c = s.currSize - 1 + i = s.pos - s.currSize + ) + + // Detect current rune size + + for isName(s.curr) { + if !s.nextChar() { + c += s.currSize + break + } + c += s.currSize + } + return s.text[i : i+c] +} + +func isName(r rune) bool { + return string(r) != ":" && string(r) != "/" && + (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*") +} + +func isDigit(r rune) bool { + return unicode.IsDigit(r) +} + +func asItemType(r rune) itemType { + switch r { + case ',': + return itemComma + case '@': + return itemAt + case '(': + return itemLParens + case ')': + return itemRParens + case '|': + return itemUnion + case '*': + return itemStar + case '[': + return itemLBracket + case ']': + return itemRBracket + case '+': + return itemPlus + case '-': + return itemMinus + case '=': + return itemEq + case '$': + return itemDollar + } + panic(fmt.Errorf("unknown item: %v", r)) +} + +var first = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, + {0x0041, 0x005A, 1}, + {0x005F, 0x005F, 1}, + {0x0061, 0x007A, 1}, + {0x00C0, 0x00D6, 1}, + {0x00D8, 0x00F6, 1}, + {0x00F8, 0x00FF, 1}, + {0x0100, 0x0131, 1}, + {0x0134, 0x013E, 1}, + {0x0141, 0x0148, 1}, + {0x014A, 0x017E, 1}, + {0x0180, 0x01C3, 1}, + {0x01CD, 0x01F0, 1}, + {0x01F4, 0x01F5, 1}, + {0x01FA, 0x0217, 1}, + {0x0250, 0x02A8, 1}, + {0x02BB, 0x02C1, 1}, + {0x0386, 0x0386, 1}, + {0x0388, 0x038A, 1}, + {0x038C, 0x038C, 1}, + {0x038E, 0x03A1, 1}, + {0x03A3, 0x03CE, 1}, + {0x03D0, 0x03D6, 1}, + {0x03DA, 0x03E0, 2}, + {0x03E2, 0x03F3, 1}, + {0x0401, 0x040C, 1}, + {0x040E, 0x044F, 1}, + {0x0451, 0x045C, 1}, + {0x045E, 0x0481, 1}, + {0x0490, 0x04C4, 1}, + {0x04C7, 0x04C8, 1}, + {0x04CB, 0x04CC, 1}, + {0x04D0, 0x04EB, 1}, + {0x04EE, 0x04F5, 1}, + {0x04F8, 0x04F9, 1}, + {0x0531, 0x0556, 1}, + {0x0559, 0x0559, 1}, + {0x0561, 0x0586, 1}, + {0x05D0, 0x05EA, 1}, + {0x05F0, 0x05F2, 1}, + {0x0621, 0x063A, 1}, + {0x0641, 0x064A, 1}, + {0x0671, 0x06B7, 1}, + {0x06BA, 0x06BE, 1}, + {0x06C0, 0x06CE, 1}, + {0x06D0, 0x06D3, 1}, + {0x06D5, 0x06D5, 1}, + {0x06E5, 0x06E6, 1}, + {0x0905, 0x0939, 1}, + {0x093D, 0x093D, 1}, + {0x0958, 0x0961, 1}, + {0x0985, 0x098C, 1}, + {0x098F, 0x0990, 1}, + {0x0993, 0x09A8, 1}, + {0x09AA, 0x09B0, 1}, + {0x09B2, 0x09B2, 1}, + {0x09B6, 0x09B9, 1}, + {0x09DC, 0x09DD, 1}, + {0x09DF, 0x09E1, 1}, + {0x09F0, 0x09F1, 1}, + {0x0A05, 0x0A0A, 1}, + {0x0A0F, 0x0A10, 1}, + {0x0A13, 0x0A28, 1}, + {0x0A2A, 0x0A30, 1}, + {0x0A32, 0x0A33, 1}, + {0x0A35, 0x0A36, 1}, + {0x0A38, 0x0A39, 1}, + {0x0A59, 0x0A5C, 1}, + {0x0A5E, 0x0A5E, 1}, + {0x0A72, 0x0A74, 1}, + {0x0A85, 0x0A8B, 1}, + {0x0A8D, 0x0A8D, 1}, + {0x0A8F, 0x0A91, 1}, + {0x0A93, 0x0AA8, 1}, + {0x0AAA, 0x0AB0, 1}, + {0x0AB2, 0x0AB3, 1}, + {0x0AB5, 0x0AB9, 1}, + {0x0ABD, 0x0AE0, 0x23}, + {0x0B05, 0x0B0C, 1}, + {0x0B0F, 0x0B10, 1}, + {0x0B13, 0x0B28, 1}, + {0x0B2A, 0x0B30, 1}, + {0x0B32, 0x0B33, 1}, + {0x0B36, 0x0B39, 1}, + {0x0B3D, 0x0B3D, 1}, + {0x0B5C, 0x0B5D, 1}, + {0x0B5F, 0x0B61, 1}, + {0x0B85, 0x0B8A, 1}, + {0x0B8E, 0x0B90, 1}, + {0x0B92, 0x0B95, 1}, + {0x0B99, 0x0B9A, 1}, + {0x0B9C, 0x0B9C, 1}, + {0x0B9E, 0x0B9F, 1}, + {0x0BA3, 0x0BA4, 1}, + {0x0BA8, 0x0BAA, 1}, + {0x0BAE, 0x0BB5, 1}, + {0x0BB7, 0x0BB9, 1}, + {0x0C05, 0x0C0C, 1}, + {0x0C0E, 0x0C10, 1}, + {0x0C12, 0x0C28, 1}, + {0x0C2A, 0x0C33, 1}, + {0x0C35, 0x0C39, 1}, + {0x0C60, 0x0C61, 1}, + {0x0C85, 0x0C8C, 1}, + {0x0C8E, 0x0C90, 1}, + {0x0C92, 0x0CA8, 1}, + {0x0CAA, 0x0CB3, 1}, + {0x0CB5, 0x0CB9, 1}, + {0x0CDE, 0x0CDE, 1}, + {0x0CE0, 0x0CE1, 1}, + {0x0D05, 0x0D0C, 1}, + {0x0D0E, 0x0D10, 1}, + {0x0D12, 0x0D28, 1}, + {0x0D2A, 0x0D39, 1}, + {0x0D60, 0x0D61, 1}, + {0x0E01, 0x0E2E, 1}, + {0x0E30, 0x0E30, 1}, + {0x0E32, 0x0E33, 1}, + {0x0E40, 0x0E45, 1}, + {0x0E81, 0x0E82, 1}, + {0x0E84, 0x0E84, 1}, + {0x0E87, 0x0E88, 1}, + {0x0E8A, 0x0E8D, 3}, + {0x0E94, 0x0E97, 1}, + {0x0E99, 0x0E9F, 1}, + {0x0EA1, 0x0EA3, 1}, + {0x0EA5, 0x0EA7, 2}, + {0x0EAA, 0x0EAB, 1}, + {0x0EAD, 0x0EAE, 1}, + {0x0EB0, 0x0EB0, 1}, + {0x0EB2, 0x0EB3, 1}, + {0x0EBD, 0x0EBD, 1}, + {0x0EC0, 0x0EC4, 1}, + {0x0F40, 0x0F47, 1}, + {0x0F49, 0x0F69, 1}, + {0x10A0, 0x10C5, 1}, + {0x10D0, 0x10F6, 1}, + {0x1100, 0x1100, 1}, + {0x1102, 0x1103, 1}, + {0x1105, 0x1107, 1}, + {0x1109, 0x1109, 1}, + {0x110B, 0x110C, 1}, + {0x110E, 0x1112, 1}, + {0x113C, 0x1140, 2}, + {0x114C, 0x1150, 2}, + {0x1154, 0x1155, 1}, + {0x1159, 0x1159, 1}, + {0x115F, 0x1161, 1}, + {0x1163, 0x1169, 2}, + {0x116D, 0x116E, 1}, + {0x1172, 0x1173, 1}, + {0x1175, 0x119E, 0x119E - 0x1175}, + {0x11A8, 0x11AB, 0x11AB - 0x11A8}, + {0x11AE, 0x11AF, 1}, + {0x11B7, 0x11B8, 1}, + {0x11BA, 0x11BA, 1}, + {0x11BC, 0x11C2, 1}, + {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, + {0x11F9, 0x11F9, 1}, + {0x1E00, 0x1E9B, 1}, + {0x1EA0, 0x1EF9, 1}, + {0x1F00, 0x1F15, 1}, + {0x1F18, 0x1F1D, 1}, + {0x1F20, 0x1F45, 1}, + {0x1F48, 0x1F4D, 1}, + {0x1F50, 0x1F57, 1}, + {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, + {0x1F5D, 0x1F5D, 1}, + {0x1F5F, 0x1F7D, 1}, + {0x1F80, 0x1FB4, 1}, + {0x1FB6, 0x1FBC, 1}, + {0x1FBE, 0x1FBE, 1}, + {0x1FC2, 0x1FC4, 1}, + {0x1FC6, 0x1FCC, 1}, + {0x1FD0, 0x1FD3, 1}, + {0x1FD6, 0x1FDB, 1}, + {0x1FE0, 0x1FEC, 1}, + {0x1FF2, 0x1FF4, 1}, + {0x1FF6, 0x1FFC, 1}, + {0x2126, 0x2126, 1}, + {0x212A, 0x212B, 1}, + {0x212E, 0x212E, 1}, + {0x2180, 0x2182, 1}, + {0x3007, 0x3007, 1}, + {0x3021, 0x3029, 1}, + {0x3041, 0x3094, 1}, + {0x30A1, 0x30FA, 1}, + {0x3105, 0x312C, 1}, + {0x4E00, 0x9FA5, 1}, + {0xAC00, 0xD7A3, 1}, + }, +} + +var second = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002D, 0x002E, 1}, + {0x0030, 0x0039, 1}, + {0x00B7, 0x00B7, 1}, + {0x02D0, 0x02D1, 1}, + {0x0300, 0x0345, 1}, + {0x0360, 0x0361, 1}, + {0x0387, 0x0387, 1}, + {0x0483, 0x0486, 1}, + {0x0591, 0x05A1, 1}, + {0x05A3, 0x05B9, 1}, + {0x05BB, 0x05BD, 1}, + {0x05BF, 0x05BF, 1}, + {0x05C1, 0x05C2, 1}, + {0x05C4, 0x0640, 0x0640 - 0x05C4}, + {0x064B, 0x0652, 1}, + {0x0660, 0x0669, 1}, + {0x0670, 0x0670, 1}, + {0x06D6, 0x06DC, 1}, + {0x06DD, 0x06DF, 1}, + {0x06E0, 0x06E4, 1}, + {0x06E7, 0x06E8, 1}, + {0x06EA, 0x06ED, 1}, + {0x06F0, 0x06F9, 1}, + {0x0901, 0x0903, 1}, + {0x093C, 0x093C, 1}, + {0x093E, 0x094C, 1}, + {0x094D, 0x094D, 1}, + {0x0951, 0x0954, 1}, + {0x0962, 0x0963, 1}, + {0x0966, 0x096F, 1}, + {0x0981, 0x0983, 1}, + {0x09BC, 0x09BC, 1}, + {0x09BE, 0x09BF, 1}, + {0x09C0, 0x09C4, 1}, + {0x09C7, 0x09C8, 1}, + {0x09CB, 0x09CD, 1}, + {0x09D7, 0x09D7, 1}, + {0x09E2, 0x09E3, 1}, + {0x09E6, 0x09EF, 1}, + {0x0A02, 0x0A3C, 0x3A}, + {0x0A3E, 0x0A3F, 1}, + {0x0A40, 0x0A42, 1}, + {0x0A47, 0x0A48, 1}, + {0x0A4B, 0x0A4D, 1}, + {0x0A66, 0x0A6F, 1}, + {0x0A70, 0x0A71, 1}, + {0x0A81, 0x0A83, 1}, + {0x0ABC, 0x0ABC, 1}, + {0x0ABE, 0x0AC5, 1}, + {0x0AC7, 0x0AC9, 1}, + {0x0ACB, 0x0ACD, 1}, + {0x0AE6, 0x0AEF, 1}, + {0x0B01, 0x0B03, 1}, + {0x0B3C, 0x0B3C, 1}, + {0x0B3E, 0x0B43, 1}, + {0x0B47, 0x0B48, 1}, + {0x0B4B, 0x0B4D, 1}, + {0x0B56, 0x0B57, 1}, + {0x0B66, 0x0B6F, 1}, + {0x0B82, 0x0B83, 1}, + {0x0BBE, 0x0BC2, 1}, + {0x0BC6, 0x0BC8, 1}, + {0x0BCA, 0x0BCD, 1}, + {0x0BD7, 0x0BD7, 1}, + {0x0BE7, 0x0BEF, 1}, + {0x0C01, 0x0C03, 1}, + {0x0C3E, 0x0C44, 1}, + {0x0C46, 0x0C48, 1}, + {0x0C4A, 0x0C4D, 1}, + {0x0C55, 0x0C56, 1}, + {0x0C66, 0x0C6F, 1}, + {0x0C82, 0x0C83, 1}, + {0x0CBE, 0x0CC4, 1}, + {0x0CC6, 0x0CC8, 1}, + {0x0CCA, 0x0CCD, 1}, + {0x0CD5, 0x0CD6, 1}, + {0x0CE6, 0x0CEF, 1}, + {0x0D02, 0x0D03, 1}, + {0x0D3E, 0x0D43, 1}, + {0x0D46, 0x0D48, 1}, + {0x0D4A, 0x0D4D, 1}, + {0x0D57, 0x0D57, 1}, + {0x0D66, 0x0D6F, 1}, + {0x0E31, 0x0E31, 1}, + {0x0E34, 0x0E3A, 1}, + {0x0E46, 0x0E46, 1}, + {0x0E47, 0x0E4E, 1}, + {0x0E50, 0x0E59, 1}, + {0x0EB1, 0x0EB1, 1}, + {0x0EB4, 0x0EB9, 1}, + {0x0EBB, 0x0EBC, 1}, + {0x0EC6, 0x0EC6, 1}, + {0x0EC8, 0x0ECD, 1}, + {0x0ED0, 0x0ED9, 1}, + {0x0F18, 0x0F19, 1}, + {0x0F20, 0x0F29, 1}, + {0x0F35, 0x0F39, 2}, + {0x0F3E, 0x0F3F, 1}, + {0x0F71, 0x0F84, 1}, + {0x0F86, 0x0F8B, 1}, + {0x0F90, 0x0F95, 1}, + {0x0F97, 0x0F97, 1}, + {0x0F99, 0x0FAD, 1}, + {0x0FB1, 0x0FB7, 1}, + {0x0FB9, 0x0FB9, 1}, + {0x20D0, 0x20DC, 1}, + {0x20E1, 0x3005, 0x3005 - 0x20E1}, + {0x302A, 0x302F, 1}, + {0x3031, 0x3035, 1}, + {0x3099, 0x309A, 1}, + {0x309D, 0x309E, 1}, + {0x30FC, 0x30FE, 1}, + }, +} diff --git a/vendor/github.com/antchfx/xpath/query.go b/vendor/github.com/antchfx/xpath/query.go new file mode 100644 index 00000000000..75c43361393 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/query.go @@ -0,0 +1,1437 @@ +package xpath + +import ( + "bytes" + "fmt" + "hash/fnv" + "reflect" +) + +// The return type of the XPath expression. +type resultType int + +var xpathResultType = struct { + Boolean resultType + // A numeric value + Number resultType + String resultType + // A node collection. + NodeSet resultType + // Any of the XPath node types. + Any resultType +}{ + Boolean: 0, + Number: 1, + String: 2, + NodeSet: 3, + Any: 4, +} + +type queryProp int + +var queryProps = struct { + None queryProp + Position queryProp + Count queryProp + Cached queryProp + Reverse queryProp + Merge queryProp +}{ + None: 0, + Position: 1, + Count: 2, + Cached: 4, + Reverse: 8, + Merge: 16, +} + +type iterator interface { + Current() NodeNavigator +} + +// An XPath query interface. +type query interface { + // Select traversing iterator returns a query matched node NodeNavigator. + Select(iterator) NodeNavigator + + // Evaluate evaluates query and returns values of the current query. + Evaluate(iterator) interface{} + + Clone() query + + // ValueType returns the value type of the current query. + ValueType() resultType + + Properties() queryProp +} + +// nopQuery is an empty query that always return nil for any query. +type nopQuery struct{} + +func (nopQuery) Select(iterator) NodeNavigator { return nil } + +func (nopQuery) Evaluate(iterator) interface{} { return nil } + +func (nopQuery) Clone() query { return nopQuery{} } + +func (nopQuery) ValueType() resultType { return xpathResultType.NodeSet } + +func (nopQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Position | queryProps.Count | queryProps.Cached +} + +// contextQuery is returns current node on the iterator object query. +type contextQuery struct { + count int +} + +func (c *contextQuery) Select(t iterator) NodeNavigator { + if c.count > 0 { + return nil + } + c.count++ + return t.Current().Copy() +} + +func (c *contextQuery) Evaluate(iterator) interface{} { + c.count = 0 + return c +} + +func (c *contextQuery) Clone() query { + return &contextQuery{} +} + +func (c *contextQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (c *contextQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Position | queryProps.Count | queryProps.Cached +} + +type absoluteQuery struct { + count int +} + +func (a *absoluteQuery) Select(t iterator) (n NodeNavigator) { + if a.count > 0 { + return + } + a.count++ + n = t.Current().Copy() + n.MoveToRoot() + return +} + +func (a *absoluteQuery) Evaluate(t iterator) interface{} { + a.count = 0 + return a +} + +func (a *absoluteQuery) Clone() query { + return &absoluteQuery{} +} + +func (a *absoluteQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (a *absoluteQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Position | queryProps.Count | queryProps.Cached +} + +// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*) +type ancestorQuery struct { + name string + iterator func() NodeNavigator + table map[uint64]bool + + Self bool + Input query + Predicate func(NodeNavigator) bool +} + +func (a *ancestorQuery) Select(t iterator) NodeNavigator { + if a.table == nil { + a.table = make(map[uint64]bool) + } + + for { + if a.iterator == nil { + node := a.Input.Select(t) + if node == nil { + return nil + } + first := true + node = node.Copy() + a.iterator = func() NodeNavigator { + if first { + first = false + if a.Self && a.Predicate(node) { + return node + } + } + for node.MoveToParent() { + if a.Predicate(node) { + return node + } + } + return nil + } + } + + for node := a.iterator(); node != nil; node = a.iterator() { + node_id := getHashCode(node.Copy()) + if _, ok := a.table[node_id]; !ok { + a.table[node_id] = true + return node + } + } + a.iterator = nil + } +} + +func (a *ancestorQuery) Evaluate(t iterator) interface{} { + a.Input.Evaluate(t) + a.iterator = nil + return a +} + +func (a *ancestorQuery) Test(n NodeNavigator) bool { + return a.Predicate(n) +} + +func (a *ancestorQuery) Clone() query { + return &ancestorQuery{name: a.name, Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate} +} + +func (a *ancestorQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (a *ancestorQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge | queryProps.Reverse +} + +// attributeQuery is an XPath attribute node query.(@*) +type attributeQuery struct { + name string + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (a *attributeQuery) Select(t iterator) NodeNavigator { + for { + if a.iterator == nil { + node := a.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + a.iterator = func() NodeNavigator { + for { + onAttr := node.MoveToNextAttribute() + if !onAttr { + return nil + } + if a.Predicate(node) { + return node + } + } + } + } + + if node := a.iterator(); node != nil { + return node + } + a.iterator = nil + } +} + +func (a *attributeQuery) Evaluate(t iterator) interface{} { + a.Input.Evaluate(t) + a.iterator = nil + return a +} + +func (a *attributeQuery) Test(n NodeNavigator) bool { + return a.Predicate(n) +} + +func (a *attributeQuery) Clone() query { + return &attributeQuery{name: a.name, Input: a.Input.Clone(), Predicate: a.Predicate} +} + +func (a *attributeQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (a *attributeQuery) Properties() queryProp { + return queryProps.Merge +} + +// childQuery is an XPath child node query.(child::*) +type childQuery struct { + name string + posit int + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (c *childQuery) Select(t iterator) NodeNavigator { + for { + if c.iterator == nil { + c.posit = 0 + node := c.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + first := true + c.iterator = func() NodeNavigator { + for { + if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) { + return nil + } + first = false + if c.Predicate(node) { + return node + } + } + } + } + + if node := c.iterator(); node != nil { + c.posit++ + return node + } + c.iterator = nil + } +} + +func (c *childQuery) Evaluate(t iterator) interface{} { + c.Input.Evaluate(t) + c.iterator = nil + return c +} + +func (c *childQuery) Test(n NodeNavigator) bool { + return c.Predicate(n) +} + +func (c *childQuery) Clone() query { + return &childQuery{name: c.name, Input: c.Input.Clone(), Predicate: c.Predicate} +} + +func (c *childQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (c *childQuery) Properties() queryProp { + return queryProps.Merge +} + +// position returns a position of current NodeNavigator. +func (c *childQuery) position() int { + return c.posit +} + +type cachedChildQuery struct { + name string + posit int + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (c *cachedChildQuery) Select(t iterator) NodeNavigator { + for { + if c.iterator == nil { + c.posit = 0 + node := c.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + first := true + c.iterator = func() NodeNavigator { + for { + if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) { + return nil + } + first = false + if c.Predicate(node) { + return node + } + } + } + } + + if node := c.iterator(); node != nil { + c.posit++ + return node + } + c.iterator = nil + } +} + +func (c *cachedChildQuery) Evaluate(t iterator) interface{} { + c.Input.Evaluate(t) + c.iterator = nil + return c +} + +func (c *cachedChildQuery) position() int { + return c.posit +} + +func (c *cachedChildQuery) Test(n NodeNavigator) bool { + return c.Predicate(n) +} + +func (c *cachedChildQuery) Clone() query { + return &childQuery{name: c.name, Input: c.Input.Clone(), Predicate: c.Predicate} +} + +func (c *cachedChildQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (c *cachedChildQuery) Properties() queryProp { + return queryProps.Merge +} + +// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*) +type descendantQuery struct { + name string + iterator func() NodeNavigator + posit int + level int + + Self bool + Input query + Predicate func(NodeNavigator) bool +} + +func (d *descendantQuery) Select(t iterator) NodeNavigator { + for { + if d.iterator == nil { + d.posit = 0 + node := d.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + d.level = 0 + first := true + d.iterator = func() NodeNavigator { + if first { + first = false + if d.Self && d.Predicate(node) { + return node + } + } + + for { + if node.MoveToChild() { + d.level = d.level + 1 + } else { + for { + if d.level == 0 { + return nil + } + if node.MoveToNext() { + break + } + node.MoveToParent() + d.level = d.level - 1 + } + } + if d.Predicate(node) { + return node + } + } + } + } + + if node := d.iterator(); node != nil { + d.posit++ + return node + } + d.iterator = nil + } +} + +func (d *descendantQuery) Evaluate(t iterator) interface{} { + d.Input.Evaluate(t) + d.iterator = nil + return d +} + +func (d *descendantQuery) Test(n NodeNavigator) bool { + return d.Predicate(n) +} + +// position returns a position of current NodeNavigator. +func (d *descendantQuery) position() int { + return d.posit +} + +func (d *descendantQuery) depth() int { + return d.level +} + +func (d *descendantQuery) Clone() query { + return &descendantQuery{name: d.name, Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate} +} + +func (d *descendantQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (d *descendantQuery) Properties() queryProp { + return queryProps.Merge +} + +// followingQuery is an XPath following node query.(following::*|following-sibling::*) +type followingQuery struct { + posit int + iterator func() NodeNavigator + + Input query + Sibling bool // The matching sibling node of current node. + Predicate func(NodeNavigator) bool +} + +func (f *followingQuery) Select(t iterator) NodeNavigator { + for { + if f.iterator == nil { + f.posit = 0 + node := f.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if f.Sibling { + f.iterator = func() NodeNavigator { + for { + if !node.MoveToNext() { + return nil + } + if f.Predicate(node) { + f.posit++ + return node + } + } + } + } else { + var q *descendantQuery // descendant query + f.iterator = func() NodeNavigator { + for { + if q == nil { + for !node.MoveToNext() { + if !node.MoveToParent() { + return nil + } + } + q = &descendantQuery{ + Self: true, + Input: &contextQuery{}, + Predicate: f.Predicate, + } + t.Current().MoveTo(node) + } + if node := q.Select(t); node != nil { + f.posit = q.posit + return node + } + q = nil + } + } + } + } + + if node := f.iterator(); node != nil { + return node + } + f.iterator = nil + } +} + +func (f *followingQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + return f +} + +func (f *followingQuery) Test(n NodeNavigator) bool { + return f.Predicate(n) +} + +func (f *followingQuery) Clone() query { + return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate} +} + +func (f *followingQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (f *followingQuery) Properties() queryProp { + return queryProps.Merge +} + +func (f *followingQuery) position() int { + return f.posit +} + +// precedingQuery is an XPath preceding node query.(preceding::*) +type precedingQuery struct { + iterator func() NodeNavigator + posit int + Input query + Sibling bool // The matching sibling node of current node. + Predicate func(NodeNavigator) bool +} + +func (p *precedingQuery) Select(t iterator) NodeNavigator { + for { + if p.iterator == nil { + p.posit = 0 + node := p.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if p.Sibling { + p.iterator = func() NodeNavigator { + for { + for !node.MoveToPrevious() { + return nil + } + if p.Predicate(node) { + p.posit++ + return node + } + } + } + } else { + var q query + p.iterator = func() NodeNavigator { + for { + if q == nil { + for !node.MoveToPrevious() { + if !node.MoveToParent() { + return nil + } + p.posit = 0 + } + q = &descendantQuery{ + Self: true, + Input: &contextQuery{}, + Predicate: p.Predicate, + } + t.Current().MoveTo(node) + } + if node := q.Select(t); node != nil { + p.posit++ + return node + } + q = nil + } + } + } + } + if node := p.iterator(); node != nil { + return node + } + p.iterator = nil + } +} + +func (p *precedingQuery) Evaluate(t iterator) interface{} { + p.Input.Evaluate(t) + return p +} + +func (p *precedingQuery) Test(n NodeNavigator) bool { + return p.Predicate(n) +} + +func (p *precedingQuery) Clone() query { + return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate} +} + +func (p *precedingQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (p *precedingQuery) Properties() queryProp { + return queryProps.Merge | queryProps.Reverse +} + +func (p *precedingQuery) position() int { + return p.posit +} + +// parentQuery is an XPath parent node query.(parent::*) +type parentQuery struct { + Input query + Predicate func(NodeNavigator) bool +} + +func (p *parentQuery) Select(t iterator) NodeNavigator { + for { + node := p.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if node.MoveToParent() && p.Predicate(node) { + return node + } + } +} + +func (p *parentQuery) Evaluate(t iterator) interface{} { + p.Input.Evaluate(t) + return p +} + +func (p *parentQuery) Clone() query { + return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate} +} + +func (p *parentQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (p *parentQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge +} + +func (p *parentQuery) Test(n NodeNavigator) bool { + return p.Predicate(n) +} + +// selfQuery is an Self node query.(self::*) +type selfQuery struct { + Input query + Predicate func(NodeNavigator) bool +} + +func (s *selfQuery) Select(t iterator) NodeNavigator { + for { + node := s.Input.Select(t) + if node == nil { + return nil + } + + if s.Predicate(node) { + return node + } + } +} + +func (s *selfQuery) Evaluate(t iterator) interface{} { + s.Input.Evaluate(t) + return s +} + +func (s *selfQuery) Test(n NodeNavigator) bool { + return s.Predicate(n) +} + +func (s *selfQuery) Clone() query { + return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate} +} + +func (s *selfQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (s *selfQuery) Properties() queryProp { + return queryProps.Merge +} + +// filterQuery is an XPath query for predicate filter. +type filterQuery struct { + Input query + Predicate query + NoPosition bool + + posit int + positmap map[int]int +} + +func (f *filterQuery) do(t iterator) bool { + val := reflect.ValueOf(f.Predicate.Evaluate(t)) + switch val.Kind() { + case reflect.Bool: + return val.Bool() + case reflect.String: + return len(val.String()) > 0 + case reflect.Float64: + pt := getNodePosition(f.Input) + return int(val.Float()) == pt + default: + if f.Predicate != nil { + return f.Predicate.Select(t) != nil + } + } + return false +} + +func (f *filterQuery) position() int { + return f.posit +} + +func (f *filterQuery) Select(t iterator) NodeNavigator { + if f.positmap == nil { + f.positmap = make(map[int]int) + } + for { + + node := f.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + + t.Current().MoveTo(node) + if f.do(t) { + // fix https://github.com/antchfx/htmlquery/issues/26 + // Calculate and keep the each of matching node's position in the same depth. + level := getNodeDepth(f.Input) + f.positmap[level]++ + f.posit = f.positmap[level] + return node + } + } +} + +func (f *filterQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + return f +} + +func (f *filterQuery) Clone() query { + return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()} +} + +func (f *filterQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (f *filterQuery) Properties() queryProp { + return (queryProps.Position | f.Input.Properties()) & (queryProps.Reverse | queryProps.Merge) +} + +// functionQuery is an XPath function that returns a computed value for +// the Evaluate call of the current NodeNavigator node. Select call isn't +// applicable for functionQuery. +type functionQuery struct { + Input query // Node Set + Func func(query, iterator) interface{} // The xpath function. +} + +func (f *functionQuery) Select(t iterator) NodeNavigator { + return nil +} + +// Evaluate call a specified function that will returns the +// following value type: number,string,boolean. +func (f *functionQuery) Evaluate(t iterator) interface{} { + return f.Func(f.Input, t) +} + +func (f *functionQuery) Clone() query { + if f.Input == nil { + return &functionQuery{Func: f.Func} + } + return &functionQuery{Input: f.Input.Clone(), Func: f.Func} +} + +func (f *functionQuery) ValueType() resultType { + return xpathResultType.Any +} + +func (f *functionQuery) Properties() queryProp { + return queryProps.Merge +} + +// transformFunctionQuery diffs from functionQuery where the latter computes a scalar +// value (number,string,boolean) for the current NodeNavigator node while the former +// (transformFunctionQuery) performs a mapping or transform of the current NodeNavigator +// and returns a new NodeNavigator. It is used for non-scalar XPath functions such as +// reverse(), remove(), subsequence(), unordered(), etc. +type transformFunctionQuery struct { + Input query + Func func(query, iterator) func() NodeNavigator + iterator func() NodeNavigator +} + +func (f *transformFunctionQuery) Select(t iterator) NodeNavigator { + if f.iterator == nil { + f.iterator = f.Func(f.Input, t) + } + return f.iterator() +} + +func (f *transformFunctionQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + f.iterator = nil + return f +} + +func (f *transformFunctionQuery) Clone() query { + return &transformFunctionQuery{Input: f.Input.Clone(), Func: f.Func} +} + +func (f *transformFunctionQuery) ValueType() resultType { + return xpathResultType.Any +} + +func (f *transformFunctionQuery) Properties() queryProp { + return queryProps.Merge +} + +// constantQuery is an XPath constant operand. +type constantQuery struct { + Val interface{} +} + +func (c *constantQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (c *constantQuery) Evaluate(t iterator) interface{} { + return c.Val +} + +func (c *constantQuery) Clone() query { + return c +} + +func (c *constantQuery) ValueType() resultType { + return getXPathType(c.Val) +} + +func (c *constantQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge +} + +type groupQuery struct { + posit int + + Input query +} + +func (g *groupQuery) Select(t iterator) NodeNavigator { + node := g.Input.Select(t) + if node == nil { + return nil + } + g.posit++ + return node +} + +func (g *groupQuery) Evaluate(t iterator) interface{} { + return g.Input.Evaluate(t) +} + +func (g *groupQuery) Clone() query { + return &groupQuery{Input: g.Input.Clone()} +} + +func (g *groupQuery) ValueType() resultType { + return g.Input.ValueType() +} + +func (g *groupQuery) Properties() queryProp { + return queryProps.Position +} + +func (g *groupQuery) position() int { + return g.posit +} + +// logicalQuery is an XPath logical expression. +type logicalQuery struct { + Left, Right query + + Do func(iterator, interface{}, interface{}) interface{} +} + +func (l *logicalQuery) Select(t iterator) NodeNavigator { + // When a XPath expr is logical expression. + node := t.Current().Copy() + val := l.Evaluate(t) + switch val.(type) { + case bool: + if val.(bool) == true { + return node + } + } + return nil +} + +func (l *logicalQuery) Evaluate(t iterator) interface{} { + m := l.Left.Evaluate(t) + n := l.Right.Evaluate(t) + return l.Do(t, m, n) +} + +func (l *logicalQuery) Clone() query { + return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do} +} + +func (l *logicalQuery) ValueType() resultType { + return xpathResultType.Boolean +} + +func (l *logicalQuery) Properties() queryProp { + return queryProps.Merge +} + +// numericQuery is an XPath numeric operator expression. +type numericQuery struct { + Left, Right query + + Do func(iterator, interface{}, interface{}) interface{} +} + +func (n *numericQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (n *numericQuery) Evaluate(t iterator) interface{} { + m := n.Left.Evaluate(t) + k := n.Right.Evaluate(t) + return n.Do(t, m, k) +} + +func (n *numericQuery) Clone() query { + return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do} +} + +func (n *numericQuery) ValueType() resultType { + return xpathResultType.Number +} + +func (n *numericQuery) Properties() queryProp { + return queryProps.Merge +} + +type booleanQuery struct { + IsOr bool + Left, Right query + iterator func() NodeNavigator +} + +func (b *booleanQuery) Select(t iterator) NodeNavigator { + if b.iterator == nil { + var list []NodeNavigator + i := 0 + root := t.Current().Copy() + if b.IsOr { + for { + node := b.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + t.Current().MoveTo(root) + for { + node := b.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + } else { + var m []NodeNavigator + var n []NodeNavigator + for { + node := b.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(m, node) + } + t.Current().MoveTo(root) + for { + node := b.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(n, node) + } + for _, k := range m { + for _, j := range n { + if k == j { + list = append(list, k) + } + } + } + } + + b.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + node := list[i] + i++ + return node + } + } + return b.iterator() +} + +func (b *booleanQuery) Evaluate(t iterator) interface{} { + n := t.Current().Copy() + + m := b.Left.Evaluate(t) + left := asBool(t, m) + if b.IsOr && left { + return true + } else if !b.IsOr && !left { + return false + } + + t.Current().MoveTo(n) + m = b.Right.Evaluate(t) + return asBool(t, m) +} + +func (b *booleanQuery) Clone() query { + return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()} +} + +func (b *booleanQuery) ValueType() resultType { + return xpathResultType.Boolean +} + +func (b *booleanQuery) Properties() queryProp { + return queryProps.Merge +} + +type unionQuery struct { + Left, Right query + iterator func() NodeNavigator +} + +func (u *unionQuery) Select(t iterator) NodeNavigator { + if u.iterator == nil { + var list []NodeNavigator + var m = make(map[uint64]bool) + root := t.Current().Copy() + for { + node := u.Left.Select(t) + if node == nil { + break + } + code := getHashCode(node.Copy()) + if _, ok := m[code]; !ok { + m[code] = true + list = append(list, node.Copy()) + } + } + t.Current().MoveTo(root) + for { + node := u.Right.Select(t) + if node == nil { + break + } + code := getHashCode(node.Copy()) + if _, ok := m[code]; !ok { + m[code] = true + list = append(list, node.Copy()) + } + } + var i int + u.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + node := list[i] + i++ + return node + } + } + return u.iterator() +} + +func (u *unionQuery) Evaluate(t iterator) interface{} { + u.iterator = nil + u.Left.Evaluate(t) + u.Right.Evaluate(t) + return u +} + +func (u *unionQuery) Clone() query { + return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()} +} + +func (u *unionQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (u *unionQuery) Properties() queryProp { + return queryProps.Merge +} + +type lastFuncQuery struct { + buffer []NodeNavigator + counted bool + + Input query +} + +func (q *lastFuncQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (q *lastFuncQuery) Evaluate(t iterator) interface{} { + if !q.counted { + for { + node := q.Input.Select(t) + if node == nil { + break + } + q.buffer = append(q.buffer, node.Copy()) + } + q.counted = true + } + return float64(len(q.buffer)) +} + +func (q *lastFuncQuery) Clone() query { + return &lastFuncQuery{Input: q.Input.Clone()} +} + +func (q *lastFuncQuery) ValueType() resultType { + return xpathResultType.Number +} + +func (q *lastFuncQuery) Properties() queryProp { + return queryProps.Merge +} + +type descendantOverDescendantQuery struct { + name string + level int + posit int + currentNode NodeNavigator + + Input query + MatchSelf bool + Predicate func(NodeNavigator) bool +} + +func (d *descendantOverDescendantQuery) moveToFirstChild() bool { + if d.currentNode.MoveToChild() { + d.level++ + return true + } + return false +} + +func (d *descendantOverDescendantQuery) moveUpUntilNext() bool { + for !d.currentNode.MoveToNext() { + d.level-- + if d.level == 0 { + return false + } + d.currentNode.MoveToParent() + } + return true +} + +func (d *descendantOverDescendantQuery) Select(t iterator) NodeNavigator { + for { + if d.level == 0 { + node := d.Input.Select(t) + if node == nil { + return nil + } + d.currentNode = node.Copy() + d.posit = 0 + if d.MatchSelf && d.Predicate(d.currentNode) { + d.posit = 1 + return d.currentNode + } + d.moveToFirstChild() + } else if !d.moveUpUntilNext() { + continue + } + for ok := true; ok; ok = d.moveToFirstChild() { + if d.Predicate(d.currentNode) { + d.posit++ + return d.currentNode + } + } + } +} + +func (d *descendantOverDescendantQuery) Evaluate(t iterator) interface{} { + d.Input.Evaluate(t) + return d +} + +func (d *descendantOverDescendantQuery) Clone() query { + return &descendantOverDescendantQuery{Input: d.Input.Clone(), Predicate: d.Predicate, MatchSelf: d.MatchSelf} +} + +func (d *descendantOverDescendantQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (d *descendantOverDescendantQuery) Properties() queryProp { + return queryProps.Merge +} + +func (d *descendantOverDescendantQuery) position() int { + return d.posit +} + +type mergeQuery struct { + Input query + Child query + + iterator func() NodeNavigator +} + +func (m *mergeQuery) Select(t iterator) NodeNavigator { + for { + if m.iterator == nil { + root := m.Input.Select(t) + if root == nil { + return nil + } + m.Child.Evaluate(t) + root = root.Copy() + t.Current().MoveTo(root) + var list []NodeNavigator + for node := m.Child.Select(t); node != nil; node = m.Child.Select(t) { + list = append(list, node.Copy()) + } + i := 0 + m.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + result := list[i] + i++ + return result + } + } + + if node := m.iterator(); node != nil { + return node + } + m.iterator = nil + } +} + +func (m *mergeQuery) Evaluate(t iterator) interface{} { + m.Input.Evaluate(t) + return m +} + +func (m *mergeQuery) Clone() query { + return &mergeQuery{Input: m.Input.Clone(), Child: m.Child.Clone()} +} + +func (m *mergeQuery) ValueType() resultType { + return xpathResultType.NodeSet +} + +func (m *mergeQuery) Properties() queryProp { + return queryProps.Position | queryProps.Count | queryProps.Cached | queryProps.Merge +} + +func getHashCode(n NodeNavigator) uint64 { + var sb bytes.Buffer + switch n.NodeType() { + case AttributeNode, TextNode, CommentNode: + sb.WriteString(fmt.Sprintf("%s=%s", n.LocalName(), n.Value())) + // https://github.com/antchfx/htmlquery/issues/25 + d := 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + for n.MoveToParent() { + d = 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + } + case ElementNode: + sb.WriteString(n.Prefix() + n.LocalName()) + d := 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + + for n.MoveToParent() { + d = 1 + for n.MoveToPrevious() { + d++ + } + sb.WriteString(fmt.Sprintf("-%d", d)) + } + } + h := fnv.New64a() + h.Write(sb.Bytes()) + return h.Sum64() +} + +func getNodePosition(q query) int { + type Position interface { + position() int + } + if count, ok := q.(Position); ok { + return count.position() + } + return 1 +} + +func getNodeDepth(q query) int { + type Depth interface { + depth() int + } + if count, ok := q.(Depth); ok { + return count.depth() + } + return 0 +} + +func getXPathType(i interface{}) resultType { + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Float64: + return xpathResultType.Number + case reflect.String: + return xpathResultType.String + case reflect.Bool: + return xpathResultType.Boolean + default: + if _, ok := i.(query); ok { + return xpathResultType.NodeSet + } + } + panic(fmt.Errorf("xpath unknown value type: %v", v.Kind())) +} diff --git a/vendor/github.com/antchfx/xpath/xpath.go b/vendor/github.com/antchfx/xpath/xpath.go new file mode 100644 index 00000000000..04bbe8d4c23 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/xpath.go @@ -0,0 +1,176 @@ +package xpath + +import ( + "errors" + "fmt" +) + +// NodeType represents a type of XPath node. +type NodeType int + +const ( + // RootNode is a root node of the XML document or node tree. + RootNode NodeType = iota + + // ElementNode is an element, such as . + ElementNode + + // AttributeNode is an attribute, such as id='123'. + AttributeNode + + // TextNode is the text content of a node. + TextNode + + // CommentNode is a comment node, such as + CommentNode + + // allNode is any types of node, used by xpath package only to predicate match. + allNode +) + +// NodeNavigator provides cursor model for navigating XML data. +type NodeNavigator interface { + // NodeType returns the XPathNodeType of the current node. + NodeType() NodeType + + // LocalName gets the Name of the current node. + LocalName() string + + // Prefix returns namespace prefix associated with the current node. + Prefix() string + + // Value gets the value of current node. + Value() string + + // Copy does a deep copy of the NodeNavigator and all its components. + Copy() NodeNavigator + + // MoveToRoot moves the NodeNavigator to the root node of the current node. + MoveToRoot() + + // MoveToParent moves the NodeNavigator to the parent node of the current node. + MoveToParent() bool + + // MoveToNextAttribute moves the NodeNavigator to the next attribute on current node. + MoveToNextAttribute() bool + + // MoveToChild moves the NodeNavigator to the first child node of the current node. + MoveToChild() bool + + // MoveToFirst moves the NodeNavigator to the first sibling node of the current node. + MoveToFirst() bool + + // MoveToNext moves the NodeNavigator to the next sibling node of the current node. + MoveToNext() bool + + // MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node. + MoveToPrevious() bool + + // MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator. + MoveTo(NodeNavigator) bool +} + +// NodeIterator holds all matched Node object. +type NodeIterator struct { + node NodeNavigator + query query +} + +// Current returns current node which matched. +func (t *NodeIterator) Current() NodeNavigator { + return t.node +} + +// MoveNext moves Navigator to the next match node. +func (t *NodeIterator) MoveNext() bool { + n := t.query.Select(t) + if n == nil { + return false + } + if !t.node.MoveTo(n) { + t.node = n.Copy() + } + return true +} + +// Select selects a node set using the specified XPath expression. +// This method is deprecated, recommend using Expr.Select() method instead. +func Select(root NodeNavigator, expr string) *NodeIterator { + exp, err := Compile(expr) + if err != nil { + panic(err) + } + return exp.Select(root) +} + +// Expr is an XPath expression for query. +type Expr struct { + s string + q query +} + +type iteratorFunc func() NodeNavigator + +func (f iteratorFunc) Current() NodeNavigator { + return f() +} + +// Evaluate returns the result of the expression. +// The result type of the expression is one of the follow: bool,float64,string,NodeIterator). +func (expr *Expr) Evaluate(root NodeNavigator) interface{} { + val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root })) + switch val.(type) { + case query: + return &NodeIterator{query: expr.q.Clone(), node: root} + } + return val +} + +// Select selects a node set using the specified XPath expression. +func (expr *Expr) Select(root NodeNavigator) *NodeIterator { + return &NodeIterator{query: expr.q.Clone(), node: root} +} + +// String returns XPath expression string. +func (expr *Expr) String() string { + return expr.s +} + +// Compile compiles an XPath expression string. +func Compile(expr string) (*Expr, error) { + if expr == "" { + return nil, errors.New("expr expression is nil") + } + qy, err := build(expr, nil) + if err != nil { + return nil, err + } + if qy == nil { + return nil, fmt.Errorf(fmt.Sprintf("undeclared variable in XPath expression: %s", expr)) + } + return &Expr{s: expr, q: qy}, nil +} + +// MustCompile compiles an XPath expression string and ignored error. +func MustCompile(expr string) *Expr { + exp, err := Compile(expr) + if err != nil { + return &Expr{s: expr, q: nopQuery{}} + } + return exp +} + +// CompileWithNS compiles an XPath expression string, using given namespaces map. +func CompileWithNS(expr string, namespaces map[string]string) (*Expr, error) { + if expr == "" { + return nil, errors.New("expr expression is nil") + } + qy, err := build(expr, namespaces) + if err != nil { + return nil, err + } + if qy == nil { + return nil, fmt.Errorf(fmt.Sprintf("undeclared variable in XPath expression: %s", expr)) + } + return &Expr{s: expr, q: qy}, nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go index de27edd674c..a9565d399df 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go @@ -56,47 +56,47 @@ const ( // // For example, say you want to migrate this old code into using TConfiguration: // -// sccket, err := thrift.NewTSocketTimeout("host:port", time.Second, time.Second) -// transFactory := thrift.NewTFramedTransportFactoryMaxLength( -// thrift.NewTTransportFactory(), -// 1024 * 1024 * 256, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) +// socket, err := thrift.NewTSocketTimeout("host:port", time.Second, time.Second) +// transFactory := thrift.NewTFramedTransportFactoryMaxLength( +// thrift.NewTTransportFactory(), +// 1024 * 1024 * 256, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) // // This is the wrong way to do it because in the end the TConfiguration used by // socket and transFactory will be overwritten by the one used by protoFactory // because of TConfiguration propagation: // -// // bad example, DO NOT USE -// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// }) -// transFactory := thrift.NewTFramedTransportFactoryConf( -// thrift.NewTTransportFactory(), -// &thrift.TConfiguration{ -// MaxFrameSize: 1024 * 1024 * 256, -// }, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// }) +// // bad example, DO NOT USE +// socket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, +// }) +// transFactory := thrift.NewTFramedTransportFactoryConf( +// thrift.NewTTransportFactory(), +// &thrift.TConfiguration{ +// MaxFrameSize: 1024 * 1024 * 256, +// }, +// ) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// }) // // This is the correct way to do it: // -// conf := &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, +// conf := &thrift.TConfiguration{ +// ConnectTimeout: time.Second, +// SocketTimeout: time.Second, // -// MaxFrameSize: 1024 * 1024 * 256, +// MaxFrameSize: 1024 * 1024 * 256, // -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// } -// sccket := thrift.NewTSocketConf("host:port", conf) -// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) +// TBinaryStrictRead: thrift.BoolPtr(true), +// TBinaryStrictWrite: thrift.BoolPtr(true), +// } +// socket := thrift.NewTSocketConf("host:port", conf) +// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) +// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) // // [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md type TConfiguration struct { @@ -132,6 +132,8 @@ type TConfiguration struct { // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions // are provided to help filling this value. THeaderProtocolID *THeaderProtocolID + // The write transforms to be applied to THeaderTransport. + THeaderTransforms []THeaderTransformID // Used internally by deprecated constructors, to avoid overriding // underlying TTransport/TProtocol's cfg by accidental propagations. @@ -245,6 +247,18 @@ func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { return protoID } +// GetTHeaderTransforms returns the THeaderTransformIDs to be applied on +// THeaderTransport writing. +// +// It's nil-safe. If tc is nil, empty slice will be returned (meaning no +// transforms to be applied). +func (tc *TConfiguration) GetTHeaderTransforms() []THeaderTransformID { + if tc == nil { + return nil + } + return tc.THeaderTransforms +} + // THeaderProtocolIDPtr validates and returns the pointer to id. // // If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go index e2f1728eac5..5b4cad96faa 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go @@ -121,20 +121,20 @@ var _ TException = wrappedTException{} // // For a endpoint defined in thrift IDL like this: // -// service MyService { -// FooResponse foo(1: FooRequest request) throws ( -// 1: Exception1 error1, -// 2: Exception2 error2, -// ) -// } +// service MyService { +// FooResponse foo(1: FooRequest request) throws ( +// 1: Exception1 error1, +// 2: Exception2 error2, +// ) +// } // // The thrift compiler generated go code for the result TStruct would be like: // -// type MyServiceFooResult struct { -// Success *FooResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -// Error1 *Exception1 `thrift:"error1,1" db:"error1" json:"error1,omitempty"` -// Error2 *Exception2 `thrift:"error2,2" db:"error2" json:"error2,omitempty"` -// } +// type MyServiceFooResult struct { +// Success *FooResponse `thrift:"success,0" db:"success" json:"success,omitempty"` +// Error1 *Exception1 `thrift:"error1,1" db:"error1" json:"error1,omitempty"` +// Error2 *Exception2 `thrift:"error2,2" db:"error2" json:"error2,omitempty"` +// } // // And this function extracts the first non-nil exception out of // *MyServiceFooResult. @@ -144,7 +144,7 @@ func ExtractExceptionFromResult(result TStruct) error { return nil } typ := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { if typ.Field(i).Name == "Success" { continue } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go index 36777b4ca00..bec84b85c49 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go @@ -119,6 +119,11 @@ func (p *THeaderProtocol) ClearWriteHeaders() { } // AddTransform add a transform for writing. +// +// Deprecated: This only applies to the next message written, and the next read +// message will cause write transforms to be reset from what's configured in +// TConfiguration. For sticky transforms, use TConfiguration.THeaderTransforms +// instead. func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { return p.transport.AddTransform(transform) } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go index 3aea5a9888d..d6d64160af3 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go @@ -128,7 +128,7 @@ var _ io.ReadCloser = (*TransformReader)(nil) // // If you don't know the closers capacity beforehand, just use // -// &TransformReader{Reader: baseReader} +// &TransformReader{Reader: baseReader} // // instead would be sufficient. func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { @@ -151,6 +151,11 @@ func (tr *TransformReader) Close() error { } // AddTransform adds a transform. +// +// Deprecated: This only applies to the next message written, and the next read +// message will cause write transforms to be reset from what's configured in +// TConfiguration. For sticky transforms, use TConfiguration.THeaderTransforms +// instead. func (tr *TransformReader) AddTransform(id THeaderTransformID) error { switch id { default: @@ -206,6 +211,25 @@ func (tw *TransformWriter) Close() error { return nil } +var zlibDefaultLevelWriterPool = newPool( + func() *zlib.Writer { + return zlib.NewWriter(nil) + }, + nil, +) + +type zlibPoolCloser struct { + writer *zlib.Writer +} + +func (z *zlibPoolCloser) Close() error { + defer func() { + z.writer.Reset(nil) + zlibDefaultLevelWriterPool.put(&z.writer) + }() + return z.writer.Close() +} + // AddTransform adds a transform. func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { switch id { @@ -217,9 +241,12 @@ func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { case TransformNone: // no-op case TransformZlib: - writeCloser := zlib.NewWriter(tw.Writer) + writeCloser := zlibDefaultLevelWriterPool.get() + writeCloser.Reset(tw.Writer) tw.Writer = writeCloser - tw.closers = append(tw.closers, writeCloser) + tw.closers = append(tw.closers, &zlibPoolCloser{ + writer: writeCloser, + }) } return nil } @@ -300,11 +327,12 @@ func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTra } PropagateTConfiguration(trans, conf) return &THeaderTransport{ - transport: trans, - reader: bufio.NewReader(trans), - writeHeaders: make(THeaderMap), - protocolID: conf.GetTHeaderProtocolID(), - cfg: conf, + transport: trans, + reader: bufio.NewReader(trans), + writeHeaders: make(THeaderMap), + writeTransforms: conf.GetTHeaderTransforms(), + protocolID: conf.GetTHeaderProtocolID(), + cfg: conf, } } @@ -449,6 +477,11 @@ func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) e } t.protocolID = THeaderProtocolID(protoID) + // Reset writeTransforms to the ones from cfg, as we are going to add + // compression transforms from what we read, we don't want to accumulate + // different transforms read from different requests + t.writeTransforms = t.cfg.GetTHeaderTransforms() + var transformCount int32 transformCount, err = hp.readVarint32() if err != nil { @@ -461,12 +494,21 @@ func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) e ) t.frameReader = reader transformIDs := make([]THeaderTransformID, transformCount) - for i := 0; i < int(transformCount); i++ { + for i := range int(transformCount) { id, err := hp.readVarint32() if err != nil { return err } - transformIDs[i] = THeaderTransformID(id) + tID := THeaderTransformID(id) + transformIDs[i] = tID + + // For compression transforms, we should also add them + // to writeTransforms so that the response (assuming we + // are reading a request) would do the same compression. + switch tID { + case TransformZlib: + t.addWriteTransformsDedupe(tID) + } } // The transform IDs on the wire was added based on the order of // writing, so on the reading side we need to reverse the order. @@ -494,7 +536,7 @@ func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) e if err != nil { return err } - for i := 0; i < int(count); i++ { + for range int(count) { key, err := hp.ReadString(ctx) if err != nil { return err @@ -544,7 +586,7 @@ func (t *THeaderTransport) Read(p []byte) (read int, err error) { // the last Read finished the frame, do endOfFrame // handling here. err = t.endOfFrame() - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { err = t.endOfFrame() if err != nil { return @@ -726,6 +768,9 @@ func (t *THeaderTransport) ClearWriteHeaders() { } // AddTransform add a transform for writing. +// +// NOTE: This is provided as a low-level API, but in general you should use +// TConfiguration.THeaderTransforms to set transforms for writing instead. func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { if !supportedTransformIDs[transform] { return NewTProtocolExceptionWithType( @@ -758,6 +803,17 @@ func (t *THeaderTransport) isFramed() bool { } } +// addWriteTransformsDedupe adds id to writeTransforms only if it's not already +// there. +func (t *THeaderTransport) addWriteTransformsDedupe(id THeaderTransformID) { + for _, existingID := range t.writeTransforms { + if existingID == id { + return + } + } + t.writeTransforms = append(t.writeTransforms, id) +} + // SetTConfiguration implements TConfigurationSetter. func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { PropagateTConfiguration(t.transport, cfg) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go index 2ee14caaad1..68cfe4aaa25 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go @@ -146,7 +146,7 @@ func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (e if err != nil { return err } - for i := 0; i < size; i++ { + for range size { err := Skip(ctx, self, keyType, maxDepth-1) if err != nil { return err @@ -163,7 +163,7 @@ func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (e if err != nil { return err } - for i := 0; i < size; i++ { + for range size { err := Skip(ctx, self, elemType, maxDepth-1) if err != nil { return err @@ -175,7 +175,7 @@ func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (e if err != nil { return err } - for i := 0; i < size; i++ { + for range size { err := Skip(ctx, self, elemType, maxDepth-1) if err != nil { return err diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go index 8b1284fd1b1..ec12991a1d7 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go @@ -30,6 +30,7 @@ import ( "io" "math" "strconv" + "strings" ) type _ParseContext int @@ -922,15 +923,7 @@ func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { if err != nil { return "", NewTProtocolException(err) } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { + if endsWithoutEscapedQuote(line) { v, ok := jsonUnquote(string(JSON_QUOTE) + line) if !ok { return "", NewTProtocolException(err) @@ -951,27 +944,29 @@ func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { } func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) + var sb strings.Builder + + for { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + sb.WriteString(line) + if endsWithoutEscapedQuote(line) { + return sb.String(), nil + } } - l := len(line) - // count number of escapes to see if we need to keep going +} + +func endsWithoutEscapedQuote(s string) bool { + l := len(s) i := 1 for ; i < l; i++ { - if line[l-i-1] != '\\' { + if s[l-i-1] != '\\' { break } } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil + return i&0x01 == 1 } func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { @@ -1200,7 +1195,7 @@ func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { for continueFor { c, err := p.reader.ReadByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return NUMERIC_NULL, NewTProtocolException(err) @@ -1311,7 +1306,7 @@ func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { // Safely peeks into the buffer, reading only what is necessary func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { + for i := range b { a, _ := p.reader.Peek(i + 1) if len(a) < (i+1) || a[i] != b[i] { return false diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go index 907afca326f..3f05ad93db6 100644 --- a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go +++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go @@ -93,6 +93,9 @@ func (p *TSSLServerSocket) Open() error { } func (p *TSSLServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } return p.addr } diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE new file mode 100644 index 00000000000..67db8588217 --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE new file mode 100644 index 00000000000..616fc588945 --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go new file mode 100644 index 00000000000..1c2670db2fa --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/msk_auth_token_provider.go @@ -0,0 +1,305 @@ +package signer + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + + "log" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + ActionType = "Action" // ActionType represents the key for the action type in the request. + ActionName = "kafka-cluster:Connect" // ActionName represents the specific action name for connecting to a Kafka cluster. + SigningName = "kafka-cluster" // SigningName represents the signing name for the Kafka cluster. + UserAgentKey = "User-Agent" // UserAgentKey represents the key for the User-Agent parameter in the request. + LibName = "aws-msk-iam-sasl-signer-go" // LibName represents the name of the library. + ExpiresQueryKey = "X-Amz-Expires" // ExpiresQueryKey represents the key for the expiration time in the query parameters. + DefaultSessionName = "MSKSASLDefaultSession" // DefaultSessionName represents the default session name for assuming a role. + DefaultExpirySeconds = 900 // DefaultExpirySeconds represents the default expiration time in seconds. +) + +var ( + endpointURLTemplate = "kafka.%s.amazonaws.com" // endpointURLTemplate represents the template for the Kafka endpoint URL + AwsDebugCreds = false // AwsDebugCreds flag indicates whether credentials should be debugged +) + +// GenerateAuthToken generates base64 encoded signed url as auth token from default credentials. +// Loads the IAM credentials from default credentials provider chain. +func GenerateAuthToken(ctx context.Context, region string) (string, int64, error) { + credentials, err := loadDefaultCredentials(ctx, region) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// GenerateAuthTokenFromProfile generates base64 encoded signed url as auth token by loading IAM credentials from an AWS named profile. +func GenerateAuthTokenFromProfile(ctx context.Context, region string, awsProfile string) (string, int64, error) { + credentials, err := loadCredentialsFromProfile(ctx, region, awsProfile) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// GenerateAuthTokenFromRole generates base64 encoded signed url as auth token by loading IAM credentials from an aws role Arn +func GenerateAuthTokenFromRole( + ctx context.Context, region string, roleArn string, stsSessionName string, +) (string, int64, error) { + if stsSessionName == "" { + stsSessionName = DefaultSessionName + } + credentials, err := loadCredentialsFromRoleArn(ctx, region, roleArn, stsSessionName) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// GenerateAuthTokenFromCredentialsProvider generates base64 encoded signed url as auth token by loading IAM credentials +// from an aws credentials provider +func GenerateAuthTokenFromCredentialsProvider( + ctx context.Context, region string, credentialsProvider aws.CredentialsProvider, +) (string, int64, error) { + credentials, err := loadCredentialsFromCredentialsProvider(ctx, credentialsProvider) + + if err != nil { + return "", 0, fmt.Errorf("failed to load credentials: %w", err) + } + + return constructAuthToken(ctx, region, credentials) +} + +// Loads credentials from the default credential chain. +func loadDefaultCredentials(ctx context.Context, region string) (*aws.Credentials, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + + if err != nil { + return nil, fmt.Errorf("unable to load SDK config: %w", err) + } + + return loadCredentialsFromCredentialsProvider(ctx, cfg.Credentials) +} + +// Loads credentials from a named aws profile. +func loadCredentialsFromProfile(ctx context.Context, region string, awsProfile string) (*aws.Credentials, error) { + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(region), + config.WithSharedConfigProfile(awsProfile), + ) + + if err != nil { + return nil, fmt.Errorf("unable to load SDK config: %w", err) + } + + return loadCredentialsFromCredentialsProvider(ctx, cfg.Credentials) +} + +// Loads credentials from a named by assuming the passed role. +// This implementation creates a new sts client for every call to get or refresh token. In order to avoid this, please +// use your own credentials provider. +// If you wish to use regional endpoint, please pass your own credentials provider. +func loadCredentialsFromRoleArn( + ctx context.Context, region string, roleArn string, stsSessionName string, +) (*aws.Credentials, error) { + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(region)) + + if err != nil { + return nil, fmt.Errorf("unable to load SDK config: %w", err) + } + + stsClient := sts.NewFromConfig(cfg) + + assumeRoleInput := &sts.AssumeRoleInput{ + RoleArn: aws.String(roleArn), + RoleSessionName: aws.String(stsSessionName), + } + assumeRoleOutput, err := stsClient.AssumeRole(ctx, assumeRoleInput) + if err != nil { + return nil, fmt.Errorf("unable to assume role, %s: %w", roleArn, err) + } + + //Create new aws.Credentials instance using the credentials from AssumeRoleOutput.Credentials + creds := aws.Credentials{ + AccessKeyID: *assumeRoleOutput.Credentials.AccessKeyId, + SecretAccessKey: *assumeRoleOutput.Credentials.SecretAccessKey, + SessionToken: *assumeRoleOutput.Credentials.SessionToken, + } + + return &creds, nil +} + +// Loads credentials from the credentials provider +func loadCredentialsFromCredentialsProvider( + ctx context.Context, credentialsProvider aws.CredentialsProvider, +) (*aws.Credentials, error) { + creds, err := credentialsProvider.Retrieve(ctx) + return &creds, err +} + +// Constructs Auth Token. +func constructAuthToken(ctx context.Context, region string, credentials *aws.Credentials) (string, int64, error) { + endpointURL := fmt.Sprintf(endpointURLTemplate, region) + + if credentials == nil || credentials.AccessKeyID == "" || credentials.SecretAccessKey == "" { + return "", 0, fmt.Errorf("aws credentials cannot be empty") + } + + if AwsDebugCreds { + logCallerIdentity(ctx, region, *credentials) + } + + req, err := buildRequest(DefaultExpirySeconds, endpointURL) + if err != nil { + return "", 0, fmt.Errorf("failed to build request for signing: %w", err) + } + + signedURL, err := signRequest(ctx, req, region, credentials) + if err != nil { + return "", 0, fmt.Errorf("failed to sign request with aws sig v4: %w", err) + } + + expirationTimeMs, err := getExpirationTimeMs(signedURL) + if err != nil { + return "", 0, fmt.Errorf("failed to extract expiration from signed url: %w", err) + } + + signedURLWithUserAgent, err := addUserAgent(signedURL) + if err != nil { + return "", 0, fmt.Errorf("failed to add user agent to the signed url: %w", err) + } + + return base64Encode(signedURLWithUserAgent), expirationTimeMs, nil +} + +// Build https request with query parameters in order to sign. +func buildRequest(expirySeconds int, endpointURL string) (*http.Request, error) { + query := url.Values{ + ActionType: {ActionName}, + ExpiresQueryKey: {strconv.FormatInt(int64(expirySeconds), 10)}, + } + + authURL := url.URL{ + Host: endpointURL, + Scheme: "https", + Path: "/", + RawQuery: query.Encode(), + } + + return http.NewRequest(http.MethodGet, authURL.String(), nil) +} + +// Sign request with aws sig v4. +func signRequest(ctx context.Context, req *http.Request, region string, credentials *aws.Credentials) (string, error) { + signer := v4.NewSigner() + signedURL, _, err := signer.PresignHTTP(ctx, *credentials, req, + calculateSHA256Hash(""), + SigningName, + region, + time.Now().UTC(), + ) + + return signedURL, err +} + +// Parses the URL and gets the expiration time in millis associated with the signed url +func getExpirationTimeMs(signedURL string) (int64, error) { + parsedURL, err := url.Parse(signedURL) + + if err != nil { + return 0, fmt.Errorf("failed to parse the signed url: %w", err) + } + + params := parsedURL.Query() + date, err := time.Parse("20060102T150405Z", params.Get("X-Amz-Date")) + + if err != nil { + return 0, fmt.Errorf("failed to parse the 'X-Amz-Date' param from signed url: %w", err) + } + + signingTimeMs := date.UnixNano() / int64(time.Millisecond) + expiryDurationSeconds, err := strconv.ParseInt(params.Get("X-Amz-Expires"), 10, 64) + + if err != nil { + return 0, fmt.Errorf("failed to parse the 'X-Amz-Expires' param from signed url: %w", err) + } + + expiryDurationMs := expiryDurationSeconds * 1000 + expiryMs := signingTimeMs + expiryDurationMs + return expiryMs, nil +} + +// Calculate sha256Hash and hex encode it. +func calculateSHA256Hash(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +// Base64 encode with raw url encoding. +func base64Encode(signedURL string) string { + signedURLBytes := []byte(signedURL) + return base64.RawURLEncoding.EncodeToString(signedURLBytes) +} + +// Add user agent to the signed url +func addUserAgent(signedURL string) (string, error) { + parsedSignedURL, err := url.Parse(signedURL) + + if err != nil { + return "", fmt.Errorf("failed to parse signed url: %w", err) + } + + query := parsedSignedURL.Query() + userAgent := strings.Join([]string{LibName, version, runtime.Version()}, "/") + query.Set(UserAgentKey, userAgent) + parsedSignedURL.RawQuery = query.Encode() + + return parsedSignedURL.String(), nil +} + +// Log caller identity to debug which credentials are being picked up +func logCallerIdentity(ctx context.Context, region string, awsCredentials aws.Credentials) { + cfg, err := config.LoadDefaultConfig(ctx, + config.WithRegion(region), + config.WithCredentialsProvider(credentials.StaticCredentialsProvider{ + Value: awsCredentials, + }), + ) + if err != nil { + log.Printf("failed to load AWS configuration: %v", err) + } + + stsClient := sts.NewFromConfig(cfg) + + callerIdentity, err := stsClient.GetCallerIdentity(ctx, &sts.GetCallerIdentityInput{}) + + if err != nil { + log.Printf("failed to get caller identity: %v", err) + } + + log.Printf("Credentials Identity: {UserId: %s, Account: %s, Arn: %s}\n", + *callerIdentity.UserId, + *callerIdentity.Account, + *callerIdentity.Arn) +} diff --git a/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go new file mode 100644 index 00000000000..d723e9085de --- /dev/null +++ b/vendor/github.com/aws/aws-msk-iam-sasl-signer-go/signer/version.go @@ -0,0 +1,3 @@ +package signer + +const version = "1.0.0" diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go new file mode 100644 index 00000000000..b361c13867c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -0,0 +1,197 @@ +package aws + +import ( + "net/http" + + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// HTTPClient provides the interface to provide custom HTTPClients. Generally +// *http.Client is sufficient for most use cases. The HTTPClient should not +// follow 301 or 302 redirects. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// A Config provides service configuration for service clients. +type Config struct { + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for + // information on AWS regions. + Region string + + // The credentials object to use when signing requests. + // Use the LoadDefaultConfig to load configuration from all the SDK's supported + // sources, and resolve credentials using the SDK's default credential chain. + Credentials CredentialsProvider + + // The Bearer Authentication token provider to use for authenticating API + // operation calls with a Bearer Authentication token. The API clients and + // operation must support Bearer Authentication scheme in order for the + // token provider to be used. API clients created with NewFromConfig will + // automatically be configured with this option, if the API client support + // Bearer Authentication. + // + // The SDK's config.LoadDefaultConfig can automatically populate this + // option for external configuration options such as SSO session. + // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html + BearerAuthTokenProvider smithybearer.TokenProvider + + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. + // The SDK defaults to a BuildableClient allowing API clients to create + // copies of the HTTP Client for service specific customizations. + // + // Use a (*http.Client) for custom behavior. Using a custom http.Client + // will prevent the SDK from modifying the HTTP client. + HTTPClient HTTPClient + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // See the `aws.EndpointResolver` documentation for additional usage + // information. + // + // Deprecated: See Config.EndpointResolverWithOptions + EndpointResolver EndpointResolver + + // An endpoint resolver that can be used to provide or override an endpoint + // for the given service and region. + // + // When EndpointResolverWithOptions is specified, it will be used by a + // service client rather than using EndpointResolver if also specified. + // + // See the `aws.EndpointResolverWithOptions` documentation for additional + // usage information. + // + // Deprecated: with the release of endpoint resolution v2 in API clients, + // EndpointResolver and EndpointResolverWithOptions are deprecated. + // Providing a value for this field will likely prevent you from using + // newer endpoint-related service features. See API client options + // EndpointResolverV2 and BaseEndpoint. + EndpointResolverWithOptions EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // API Clients will only use this value to construct a retryer if the + // Config.Retryer member is not nil. This value will be ignored if + // Retryer is not nil. + RetryMode RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + // + // In general, the provider function should return a new instance of a + // Retryer if you are attempting to provide a consistent Retryer + // configuration across all clients. This will ensure that each client will + // be provided a new instance of the Retryer implementation, and will avoid + // issues such as sharing the same retry token bucket across services. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored by API + // clients. + Retryer func() Retryer + + // ConfigSources are the sources that were used to construct the Config. + // Allows for additional configuration to be loaded by clients. + ConfigSources []interface{} + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // The logger writer interface to write logging messages to. Defaults to + // standard error. + Logger logging.Logger + + // Configures the events that will be sent to the configured logger. This + // can be used to configure the logging of signing, retries, request, and + // responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode ClientLogMode + + // The configured DefaultsMode. If not specified, service clients will + // default to legacy. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, + // standard + DefaultsMode DefaultsMode + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode + // is set to DefaultsModeAuto and is initialized by + // `config.LoadDefaultConfig`. You should not populate this structure + // programmatically, or rely on the values here within your applications. + RuntimeEnvironment RuntimeEnvironment + + // AppId is an optional application specific identifier that can be set. + // When set it will be appended to the User-Agent header of every request + // in the form of App/{AppId}. This variable is sourced from environment + // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. + // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for + // more information on environment variables and shared config settings. + AppID string + + // BaseEndpoint is an intermediary transfer location to a service specific + // BaseEndpoint on a service's Options. + BaseEndpoint *string +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +func NewConfig() *Config { + return &Config{} +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c Config) Copy() Config { + cp := c + return cp +} + +// EndpointDiscoveryEnableState indicates if endpoint discovery is +// enabled, disabled, auto or unset state. +// +// Default behavior (Auto or Unset) indicates operations that require endpoint +// discovery will use Endpoint Discovery by default. Operations that +// optionally use Endpoint Discovery will not use Endpoint Discovery +// unless EndpointDiscovery is explicitly enabled. +type EndpointDiscoveryEnableState uint + +// Enumeration values for EndpointDiscoveryEnableState +const ( + // EndpointDiscoveryUnset represents EndpointDiscoveryEnableState is unset. + // Users do not need to use this value explicitly. The behavior for unset + // is the same as for EndpointDiscoveryAuto. + EndpointDiscoveryUnset EndpointDiscoveryEnableState = iota + + // EndpointDiscoveryAuto represents an AUTO state that allows endpoint + // discovery only when required by the api. This is the default + // configuration resolved by the client if endpoint discovery is neither + // enabled or disabled. + EndpointDiscoveryAuto // default state + + // EndpointDiscoveryDisabled indicates client MUST not perform endpoint + // discovery even when required. + EndpointDiscoveryDisabled + + // EndpointDiscoveryEnabled indicates client MUST always perform endpoint + // discovery if supported for the operation. + EndpointDiscoveryEnabled +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go new file mode 100644 index 00000000000..4d8e26ef321 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/context.go @@ -0,0 +1,22 @@ +package aws + +import ( + "context" + "time" +) + +type suppressedContext struct { + context.Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go new file mode 100644 index 00000000000..781ac0ae2c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -0,0 +1,224 @@ +package aws + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" +) + +// CredentialsCacheOptions are the options +type CredentialsCacheOptions struct { + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // An ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. This can cause an + // increased number of requests to refresh the credentials to occur. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // ExpiryWindowJitterFrac provides a mechanism for randomizing the + // expiration of credentials within the configured ExpiryWindow by a random + // percentage. Valid values are between 0.0 and 1.0. + // + // As an example if ExpiryWindow is 60 seconds and ExpiryWindowJitterFrac + // is 0.5 then credentials will be set to expire between 30 to 60 seconds + // prior to their actual expiration time. + // + // If ExpiryWindow is 0 or less then ExpiryWindowJitterFrac is ignored. + // If ExpiryWindowJitterFrac is 0 then no randomization will be applied to the window. + // If ExpiryWindowJitterFrac < 0 the value will be treated as 0. + // If ExpiryWindowJitterFrac > 1 the value will be treated as 1. + ExpiryWindowJitterFrac float64 +} + +// CredentialsCache provides caching and concurrency safe credentials retrieval +// via the provider's retrieve method. +// +// CredentialsCache will look for optional interfaces on the Provider to adjust +// how the credential cache handles credentials caching. +// +// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle +// credential refresh failures. This could return an updated Credentials +// value, or attempt another means of retrieving credentials. +// +// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how +// credentials Expires is modified. This could modify how the Credentials +// Expires is adjusted based on the CredentialsCache ExpiryWindow option. +// Such as providing a floor not to reduce the Expires below. +type CredentialsCache struct { + provider CredentialsProvider + + options CredentialsCacheOptions + creds atomic.Value + sf singleflight.Group +} + +// NewCredentialsCache returns a CredentialsCache that wraps provider. Provider +// is expected to not be nil. A variadic list of one or more functions can be +// provided to modify the CredentialsCache configuration. This allows for +// configuration of credential expiry window and jitter. +func NewCredentialsCache(provider CredentialsProvider, optFns ...func(options *CredentialsCacheOptions)) *CredentialsCache { + options := CredentialsCacheOptions{} + + for _, fn := range optFns { + fn(&options) + } + + if options.ExpiryWindow < 0 { + options.ExpiryWindow = 0 + } + + if options.ExpiryWindowJitterFrac < 0 { + options.ExpiryWindowJitterFrac = 0 + } else if options.ExpiryWindowJitterFrac > 1 { + options.ExpiryWindowJitterFrac = 1 + } + + return &CredentialsCache{ + provider: provider, + options: options, + } +} + +// Retrieve returns the credentials. If the credentials have already been +// retrieved, and not expired the cached credentials will be returned. If the +// credentials have not been retrieved yet, or expired the provider's Retrieve +// method will be called. +// +// Returns and error if the provider's retrieve method returns an error. +func (p *CredentialsCache) Retrieve(ctx context.Context) (Credentials, error) { + if creds, ok := p.getCreds(); ok && !creds.Expired() { + return creds, nil + } + + resCh := p.sf.DoChan("", func() (interface{}, error) { + return p.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Credentials), res.Err + case <-ctx.Done(): + return Credentials{}, &RequestCanceledError{Err: ctx.Err()} + } +} + +func (p *CredentialsCache) singleRetrieve(ctx context.Context) (interface{}, error) { + currCreds, ok := p.getCreds() + if ok && !currCreds.Expired() { + return currCreds, nil + } + + newCreds, err := p.provider.Retrieve(ctx) + if err != nil { + handleFailToRefresh := defaultHandleFailToRefresh + if cs, ok := p.provider.(HandleFailRefreshCredentialsCacheStrategy); ok { + handleFailToRefresh = cs.HandleFailToRefresh + } + newCreds, err = handleFailToRefresh(ctx, currCreds, err) + if err != nil { + return Credentials{}, fmt.Errorf("failed to refresh cached credentials, %w", err) + } + } + + if newCreds.CanExpire && p.options.ExpiryWindow > 0 { + adjustExpiresBy := defaultAdjustExpiresBy + if cs, ok := p.provider.(AdjustExpiresByCredentialsCacheStrategy); ok { + adjustExpiresBy = cs.AdjustExpiresBy + } + + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return Credentials{}, fmt.Errorf("failed to get random provider, %w", err) + } + + var jitter time.Duration + if p.options.ExpiryWindowJitterFrac > 0 { + jitter = time.Duration(randFloat64 * + p.options.ExpiryWindowJitterFrac * float64(p.options.ExpiryWindow)) + } + + newCreds, err = adjustExpiresBy(newCreds, -(p.options.ExpiryWindow - jitter)) + if err != nil { + return Credentials{}, fmt.Errorf("failed to adjust credentials expires, %w", err) + } + } + + p.creds.Store(&newCreds) + return newCreds, nil +} + +// getCreds returns the currently stored credentials and true. Returning false +// if no credentials were stored. +func (p *CredentialsCache) getCreds() (Credentials, bool) { + v := p.creds.Load() + if v == nil { + return Credentials{}, false + } + + c := v.(*Credentials) + if c == nil || !c.HasKeys() { + return Credentials{}, false + } + + return *c, true +} + +// Invalidate will invalidate the cached credentials. The next call to Retrieve +// will cause the provider's Retrieve method to be called. +func (p *CredentialsCache) Invalidate() { + p.creds.Store((*Credentials)(nil)) +} + +// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache +// matches the target provider type. +func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool { + return IsCredentialsProvider(p.provider, target) +} + +// HandleFailRefreshCredentialsCacheStrategy is an interface for +// CredentialsCache to allow CredentialsProvider how failed to refresh +// credentials is handled. +type HandleFailRefreshCredentialsCacheStrategy interface { + // Given the previously cached Credentials, if any, and refresh error, may + // returns new or modified set of Credentials, or error. + // + // Credential caches may use default implementation if nil. + HandleFailToRefresh(context.Context, Credentials, error) (Credentials, error) +} + +// defaultHandleFailToRefresh returns the passed in error. +func defaultHandleFailToRefresh(ctx context.Context, _ Credentials, err error) (Credentials, error) { + return Credentials{}, err +} + +// AdjustExpiresByCredentialsCacheStrategy is an interface for CredentialCache +// to allow CredentialsProvider to intercept adjustments to Credentials expiry +// based on expectations and use cases of CredentialsProvider. +// +// Credential caches may use default implementation if nil. +type AdjustExpiresByCredentialsCacheStrategy interface { + // Given a Credentials as input, applying any mutations and + // returning the potentially updated Credentials, or error. + AdjustExpiresBy(Credentials, time.Duration) (Credentials, error) +} + +// defaultAdjustExpiresBy adds the duration to the passed in credentials Expires, +// and returns the updated credentials value. If Credentials value's CanExpire +// is false, the passed in credentials are returned unchanged. +func defaultAdjustExpiresBy(creds Credentials, dur time.Duration) (Credentials, error) { + if !creds.CanExpire { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go new file mode 100644 index 00000000000..714d4ad85cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -0,0 +1,170 @@ +package aws + +import ( + "context" + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +// AnonymousCredentials provides a sentinel CredentialsProvider that should be +// used to instruct the SDK's signing middleware to not sign the request. +// +// Using `nil` credentials when configuring an API client will achieve the same +// result. The AnonymousCredentials type allows you to configure the SDK's +// external config loading to not attempt to source credentials from the shared +// config or environment. +// +// For example you can use this CredentialsProvider with an API client's +// Options to instruct the client not to sign a request for accessing public +// S3 bucket objects. +// +// The following example demonstrates using the AnonymousCredentials to prevent +// SDK's external config loading attempt to resolve credentials. +// +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithCredentialsProvider(aws.AnonymousCredentials{}), +// ) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(cfg) +// +// Alternatively you can leave the API client Option's `Credential` member to +// nil. If using the `NewFromConfig` constructor you'll need to explicitly set +// the `Credentials` member to nil, if the external config resolved a +// credential provider. +// +// client := s3.New(s3.Options{ +// // Credentials defaults to a nil value. +// }) +// +// This can also be configured for specific operations calls too. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } +// +// client := s3.NewFromConfig(config) +// +// result, err := client.GetObject(context.TODO(), s3.GetObject{ +// Bucket: aws.String("example-bucket"), +// Key: aws.String("example-key"), +// }, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) +type AnonymousCredentials struct{} + +// Retrieve implements the CredentialsProvider interface, but will always +// return error, and cannot be used to sign a request. The AnonymousCredentials +// type is used as a sentinel type instructing the AWS request signing +// middleware to not sign a request. +func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { + return Credentials{Source: "AnonymousCredentials"}, + fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") +} + +// A Credentials is the AWS credentials value for individual credential fields. +type Credentials struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Source of the credentials + Source string + + // States if the credentials can expire or not. + CanExpire bool + + // The time the credentials will expire at. Should be ignored if CanExpire + // is false. + Expires time.Time +} + +// Expired returns if the credentials have expired. +func (v Credentials) Expired() bool { + if v.CanExpire { + // Calling Round(0) on the current time will truncate the monotonic + // reading only. Ensures credential expiry time is always based on + // reported wall-clock time. + return !v.Expires.After(sdk.NowTime().Round(0)) + } + + return false +} + +// HasKeys returns if the credentials keys are set. +func (v Credentials) HasKeys() bool { + return len(v.AccessKeyID) > 0 && len(v.SecretAccessKey) > 0 +} + +// A CredentialsProvider is the interface for any component which will provide +// credentials Credentials. A CredentialsProvider is required to manage its own +// Expired state, and what to be expired means. +// +// A credentials provider implementation can be wrapped with a CredentialCache +// to cache the credential value retrieved. Without the cache the SDK will +// attempt to retrieve the credentials for every request. +type CredentialsProvider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve(ctx context.Context) (Credentials, error) +} + +// CredentialsProviderFunc provides a helper wrapping a function value to +// satisfy the CredentialsProvider interface. +type CredentialsProviderFunc func(context.Context) (Credentials, error) + +// Retrieve delegates to the function value the CredentialsProviderFunc wraps. +func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) { + return fn(ctx) +} + +type isCredentialsProvider interface { + IsCredentialsProvider(CredentialsProvider) bool +} + +// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the +// implementation type. +// +// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating +// whether target matches the credential provider type. +// +// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used: +// +// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false +// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false +// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false +// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false +func IsCredentialsProvider(provider, target CredentialsProvider) bool { + if target == nil || provider == nil { + return provider == target + } + + if x, ok := provider.(isCredentialsProvider); ok { + return x.IsCredentialsProvider(target) + } + + targetType := reflect.TypeOf(target) + if targetType.Kind() != reflect.Ptr { + targetType = reflect.PtrTo(targetType) + } + + providerType := reflect.TypeOf(provider) + if providerType.Kind() != reflect.Ptr { + providerType = reflect.PtrTo(providerType) + } + + return targetType.AssignableTo(providerType) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go new file mode 100644 index 00000000000..fd408e51860 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/auto.go @@ -0,0 +1,38 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "runtime" + "strings" +) + +var getGOOS = func() string { + return runtime.GOOS +} + +// ResolveDefaultsModeAuto is used to determine the effective aws.DefaultsMode when the mode +// is set to aws.DefaultsModeAuto. +func ResolveDefaultsModeAuto(region string, environment aws.RuntimeEnvironment) aws.DefaultsMode { + goos := getGOOS() + if goos == "android" || goos == "ios" { + return aws.DefaultsModeMobile + } + + var currentRegion string + if len(environment.EnvironmentIdentifier) > 0 { + currentRegion = environment.Region + } + + if len(currentRegion) == 0 && len(environment.EC2InstanceMetadataRegion) > 0 { + currentRegion = environment.EC2InstanceMetadataRegion + } + + if len(region) > 0 && len(currentRegion) > 0 { + if strings.EqualFold(region, currentRegion) { + return aws.DefaultsModeInRegion + } + return aws.DefaultsModeCrossRegion + } + + return aws.DefaultsModeStandard +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go new file mode 100644 index 00000000000..8b7e01fa29a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/configuration.go @@ -0,0 +1,43 @@ +package defaults + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// Configuration is the set of SDK configuration options that are determined based +// on the configured DefaultsMode. +type Configuration struct { + // RetryMode is the configuration's default retry mode API clients should + // use for constructing a Retryer. + RetryMode aws.RetryMode + + // ConnectTimeout is the maximum amount of time a dial will wait for + // a connect to complete. + // + // See https://pkg.go.dev/net#Dialer.Timeout + ConnectTimeout *time.Duration + + // TLSNegotiationTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. + // + // See https://pkg.go.dev/net/http#Transport.TLSHandshakeTimeout + TLSNegotiationTimeout *time.Duration +} + +// GetConnectTimeout returns the ConnectTimeout value, returns false if the value is not set. +func (c *Configuration) GetConnectTimeout() (time.Duration, bool) { + if c.ConnectTimeout == nil { + return 0, false + } + return *c.ConnectTimeout, true +} + +// GetTLSNegotiationTimeout returns the TLSNegotiationTimeout value, returns false if the value is not set. +func (c *Configuration) GetTLSNegotiationTimeout() (time.Duration, bool) { + if c.TLSNegotiationTimeout == nil { + return 0, false + } + return *c.TLSNegotiationTimeout, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go new file mode 100644 index 00000000000..dbaa873dc89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/defaults.go @@ -0,0 +1,50 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsconfig. DO NOT EDIT. + +package defaults + +import ( + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "time" +) + +// GetModeConfiguration returns the default Configuration descriptor for the given mode. +// +// Supports the following modes: cross-region, in-region, mobile, standard +func GetModeConfiguration(mode aws.DefaultsMode) (Configuration, error) { + var mv aws.DefaultsMode + mv.SetFromString(string(mode)) + + switch mv { + case aws.DefaultsModeCrossRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeInRegion: + settings := Configuration{ + ConnectTimeout: aws.Duration(1100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(1100 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeMobile: + settings := Configuration{ + ConnectTimeout: aws.Duration(30000 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(30000 * time.Millisecond), + } + return settings, nil + case aws.DefaultsModeStandard: + settings := Configuration{ + ConnectTimeout: aws.Duration(3100 * time.Millisecond), + RetryMode: aws.RetryMode("standard"), + TLSNegotiationTimeout: aws.Duration(3100 * time.Millisecond), + } + return settings, nil + default: + return Configuration{}, fmt.Errorf("unsupported defaults mode: %v", mode) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go new file mode 100644 index 00000000000..2d90011b426 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaults/doc.go @@ -0,0 +1,2 @@ +// Package defaults provides recommended configuration values for AWS SDKs and CLIs. +package defaults diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go new file mode 100644 index 00000000000..fcf9387c281 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/defaultsmode.go @@ -0,0 +1,95 @@ +// Code generated by github.com/aws/aws-sdk-go-v2/internal/codegen/cmd/defaultsmode. DO NOT EDIT. + +package aws + +import ( + "strings" +) + +// DefaultsMode is the SDK defaults mode setting. +type DefaultsMode string + +// The DefaultsMode constants. +const ( + // DefaultsModeAuto is an experimental mode that builds on the standard mode. + // The SDK will attempt to discover the execution environment to determine the + // appropriate settings automatically. + // + // Note that the auto detection is heuristics-based and does not guarantee 100% + // accuracy. STANDARD mode will be used if the execution environment cannot + // be determined. The auto detection might query EC2 Instance Metadata service + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html), + // which might introduce latency. Therefore we recommend choosing an explicit + // defaults_mode instead if startup latency is critical to your application + DefaultsModeAuto DefaultsMode = "auto" + + // DefaultsModeCrossRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services in a different region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeCrossRegion DefaultsMode = "cross-region" + + // DefaultsModeInRegion builds on the standard mode and includes optimization + // tailored for applications which call AWS services from within the same AWS + // region + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeInRegion DefaultsMode = "in-region" + + // DefaultsModeLegacy provides default settings that vary per SDK and were used + // prior to establishment of defaults_mode + DefaultsModeLegacy DefaultsMode = "legacy" + + // DefaultsModeMobile builds on the standard mode and includes optimization + // tailored for mobile applications + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeMobile DefaultsMode = "mobile" + + // DefaultsModeStandard provides the latest recommended default values that + // should be safe to run in most scenarios + // + // Note that the default values vended from this mode might change as best practices + // may evolve. As a result, it is encouraged to perform tests when upgrading + // the SDK + DefaultsModeStandard DefaultsMode = "standard" +) + +// SetFromString sets the DefaultsMode value to one of the pre-defined constants that matches +// the provided string when compared using EqualFold. If the value does not match a known +// constant it will be set to as-is and the function will return false. As a special case, if the +// provided value is a zero-length string, the mode will be set to LegacyDefaultsMode. +func (d *DefaultsMode) SetFromString(v string) (ok bool) { + switch { + case strings.EqualFold(v, string(DefaultsModeAuto)): + *d = DefaultsModeAuto + ok = true + case strings.EqualFold(v, string(DefaultsModeCrossRegion)): + *d = DefaultsModeCrossRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeInRegion)): + *d = DefaultsModeInRegion + ok = true + case strings.EqualFold(v, string(DefaultsModeLegacy)): + *d = DefaultsModeLegacy + ok = true + case strings.EqualFold(v, string(DefaultsModeMobile)): + *d = DefaultsModeMobile + ok = true + case strings.EqualFold(v, string(DefaultsModeStandard)): + *d = DefaultsModeStandard + ok = true + case len(v) == 0: + *d = DefaultsModeLegacy + ok = true + default: + *d = DefaultsMode(v) + } + return ok +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go new file mode 100644 index 00000000000..d8b6e09e593 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go @@ -0,0 +1,62 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// # Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.ToString(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.ToStringSlice(strPtrs) +// +// # SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws + +// generate.go uses a build tag of "ignore", go run doesn't need to specify +// this because go run ignores all build flags when running a go file directly. +//go:generate go run -tags codegen generate.go +//go:generate go run -tags codegen logging_generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go new file mode 100644 index 00000000000..aa10a9b40f0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -0,0 +1,229 @@ +package aws + +import ( + "fmt" +) + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enables dual-stack endpoint resolution for service endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// GetUseDualStackEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseDualStackEndpoint(options ...interface{}) (value DualStackEndpointState, found bool) { + type iface interface { + GetUseDualStackEndpoint() DualStackEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseDualStackEndpoint() + found = true + break + } + } + return value, found +} + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// GetUseFIPSEndpoint takes a service's EndpointResolverOptions and returns the UseDualStackEndpoint value. +// Returns boolean false if the provided options does not have a method to retrieve the DualStackEndpointState. +func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found bool) { + type iface interface { + GetUseFIPSEndpoint() FIPSEndpointState + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetUseFIPSEndpoint() + found = true + break + } + } + return value, found +} + +// Endpoint represents the endpoint a service client should make API operation +// calls to. +// +// The SDK will automatically resolve these endpoints per API client using an +// internal endpoint resolvers. If you'd like to provide custom endpoint +// resolving behavior you can implement the EndpointResolver interface. +type Endpoint struct { + // The base URL endpoint the SDK API clients will use to make API calls to. + // The SDK will suffix URI path and query elements to this endpoint. + URL string + + // Specifies if the endpoint's hostname can be modified by the SDK's API + // client. + // + // If the hostname is mutable the SDK API clients may modify any part of + // the hostname based on the requirements of the API, (e.g. adding, or + // removing content in the hostname). Such as, Amazon S3 API client + // prefixing "bucketname" to the hostname, or changing the + // hostname service name component from "s3." to "s3-accesspoint.dualstack." + // for the dualstack endpoint of an S3 Accesspoint resource. + // + // Care should be taken when providing a custom endpoint for an API. If the + // endpoint hostname is mutable, and the client cannot modify the endpoint + // correctly, the operation call will most likely fail, or have undefined + // behavior. + // + // If hostname is immutable, the SDK API clients will not modify the + // hostname of the URL. This may cause the API client not to function + // correctly if the API requires the operation specific hostname values + // to be used by the client. + // + // This flag does not modify the API client's behavior if this endpoint + // will be used instead of Endpoint Discovery, or if the endpoint will be + // used to perform Endpoint Discovery. That behavior is configured via the + // API Client's Options. + HostnameImmutable bool + + // The AWS partition the endpoint belongs to. + PartitionID string + + // The service name that should be used for signing the requests to the + // endpoint. + SigningName string + + // The region that should be used for signing the request to the endpoint. + SigningRegion string + + // The signing method that should be used for signing the requests to the + // endpoint. + SigningMethod string + + // The source of the Endpoint. By default, this will be EndpointSourceServiceMetadata. + // When providing a custom endpoint, you should set the source as EndpointSourceCustom. + // If source is not provided when providing a custom endpoint, the SDK may not + // perform required host mutations correctly. Source should be used along with + // HostnameImmutable property as per the usage requirement. + Source EndpointSource +} + +// EndpointSource is the endpoint source type. +type EndpointSource int + +const ( + // EndpointSourceServiceMetadata denotes service modeled endpoint metadata is used as Endpoint Source. + EndpointSourceServiceMetadata EndpointSource = iota + + // EndpointSourceCustom denotes endpoint is a custom endpoint. This source should be used when + // user provides a custom endpoint to be used by the SDK. + EndpointSourceCustom +) + +// EndpointNotFoundError is a sentinel error to indicate that the +// EndpointResolver implementation was unable to resolve an endpoint for the +// given service and region. Resolvers should use this to indicate that an API +// client should fallback and attempt to use it's internal default resolver to +// resolve the endpoint. +type EndpointNotFoundError struct { + Err error +} + +// Error is the error message. +func (e *EndpointNotFoundError) Error() string { + return fmt.Sprintf("endpoint not found, %v", e.Err) +} + +// Unwrap returns the underlying error. +func (e *EndpointNotFoundError) Unwrap() error { + return e.Err +} + +// EndpointResolver is an endpoint resolver that can be used to provide or +// override an endpoint for the given service and region. API clients will +// attempt to use the EndpointResolver first to resolve an endpoint if +// available. If the EndpointResolver returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +// +// Deprecated: See EndpointResolverWithOptions +type EndpointResolver interface { + ResolveEndpoint(service, region string) (Endpoint, error) +} + +// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. +// +// Deprecated: See EndpointResolverWithOptionsFunc +type EndpointResolverFunc func(service, region string) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +// +// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint +func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { + return e(service, region) +} + +// EndpointResolverWithOptions is an endpoint resolver that can be used to provide or +// override an endpoint for the given service, region, and the service client's EndpointOptions. API clients will +// attempt to use the EndpointResolverWithOptions first to resolve an endpoint if +// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, +// API clients will fallback to attempting to resolve the endpoint using its +// internal default endpoint resolver. +type EndpointResolverWithOptions interface { + ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) +} + +// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) + +// ResolveEndpoint calls the wrapped function and returns the results. +func (e EndpointResolverWithOptionsFunc) ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) { + return e(service, region, options...) +} + +// GetDisableHTTPS takes a service's EndpointResolverOptions and returns the DisableHTTPS value. +// Returns boolean false if the provided options does not have a method to retrieve the DisableHTTPS. +func GetDisableHTTPS(options ...interface{}) (value bool, found bool) { + type iface interface { + GetDisableHTTPS() bool + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetDisableHTTPS() + found = true + break + } + } + return value, found +} + +// GetResolvedRegion takes a service's EndpointResolverOptions and returns the ResolvedRegion value. +// Returns boolean false if the provided options does not have a method to retrieve the ResolvedRegion. +func GetResolvedRegion(options ...interface{}) (value string, found bool) { + type iface interface { + GetResolvedRegion() string + } + for _, option := range options { + if i, ok := option.(iface); ok { + value = i.GetResolvedRegion() + found = true + break + } + } + return value, found +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go new file mode 100644 index 00000000000..f390a08f9ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/errors.go @@ -0,0 +1,9 @@ +package aws + +// MissingRegionError is an error that is returned if region configuration +// value was not found. +type MissingRegionError struct{} + +func (*MissingRegionError) Error() string { + return "an AWS region is required, but was not found" +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go new file mode 100644 index 00000000000..2394418e9bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/from_ptr.go @@ -0,0 +1,365 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + return ptr.ToBool(p) +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + return ptr.ToBoolSlice(vs) +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + return ptr.ToBoolMap(vs) +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + return ptr.ToByte(p) +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + return ptr.ToByteSlice(vs) +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + return ptr.ToByteMap(vs) +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + return ptr.ToString(p) +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + return ptr.ToStringSlice(vs) +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + return ptr.ToStringMap(vs) +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + return ptr.ToInt(p) +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + return ptr.ToIntSlice(vs) +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + return ptr.ToIntMap(vs) +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + return ptr.ToInt8(p) +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + return ptr.ToInt8Slice(vs) +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + return ptr.ToInt8Map(vs) +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + return ptr.ToInt16(p) +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + return ptr.ToInt16Slice(vs) +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + return ptr.ToInt16Map(vs) +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + return ptr.ToInt32(p) +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + return ptr.ToInt32Slice(vs) +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + return ptr.ToInt32Map(vs) +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + return ptr.ToInt64(p) +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + return ptr.ToInt64Slice(vs) +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + return ptr.ToInt64Map(vs) +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + return ptr.ToUint(p) +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + return ptr.ToUintSlice(vs) +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + return ptr.ToUintMap(vs) +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + return ptr.ToUint8(p) +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + return ptr.ToUint8Slice(vs) +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + return ptr.ToUint8Map(vs) +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + return ptr.ToUint16(p) +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + return ptr.ToUint16Slice(vs) +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + return ptr.ToUint16Map(vs) +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + return ptr.ToUint32(p) +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + return ptr.ToUint32Slice(vs) +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + return ptr.ToUint32Map(vs) +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + return ptr.ToUint64(p) +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + return ptr.ToUint64Slice(vs) +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + return ptr.ToUint64Map(vs) +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + return ptr.ToFloat32(p) +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + return ptr.ToFloat32Slice(vs) +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + return ptr.ToFloat32Map(vs) +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + return ptr.ToFloat64(p) +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + return ptr.ToFloat64Slice(vs) +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + return ptr.ToFloat64Map(vs) +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + return ptr.ToTime(p) +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + return ptr.ToTimeSlice(vs) +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + return ptr.ToTimeMap(vs) +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + return ptr.ToDuration(p) +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + return ptr.ToDurationSlice(vs) +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + return ptr.ToDurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go new file mode 100644 index 00000000000..a88bb2f7588 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package aws + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.22.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go new file mode 100644 index 00000000000..91c94d987b1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go @@ -0,0 +1,119 @@ +// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( + LogSigning ClientLogMode = 1 << (64 - 1 - iota) + LogRetries + LogRequest + LogRequestWithBody + LogResponse + LogResponseWithBody + LogDeprecatedUsage + LogRequestEventMessage + LogResponseEventMessage +) + +// IsSigning returns whether the Signing logging mode bit is set +func (m ClientLogMode) IsSigning() bool { + return m&LogSigning != 0 +} + +// IsRetries returns whether the Retries logging mode bit is set +func (m ClientLogMode) IsRetries() bool { + return m&LogRetries != 0 +} + +// IsRequest returns whether the Request logging mode bit is set +func (m ClientLogMode) IsRequest() bool { + return m&LogRequest != 0 +} + +// IsRequestWithBody returns whether the RequestWithBody logging mode bit is set +func (m ClientLogMode) IsRequestWithBody() bool { + return m&LogRequestWithBody != 0 +} + +// IsResponse returns whether the Response logging mode bit is set +func (m ClientLogMode) IsResponse() bool { + return m&LogResponse != 0 +} + +// IsResponseWithBody returns whether the ResponseWithBody logging mode bit is set +func (m ClientLogMode) IsResponseWithBody() bool { + return m&LogResponseWithBody != 0 +} + +// IsDeprecatedUsage returns whether the DeprecatedUsage logging mode bit is set +func (m ClientLogMode) IsDeprecatedUsage() bool { + return m&LogDeprecatedUsage != 0 +} + +// IsRequestEventMessage returns whether the RequestEventMessage logging mode bit is set +func (m ClientLogMode) IsRequestEventMessage() bool { + return m&LogRequestEventMessage != 0 +} + +// IsResponseEventMessage returns whether the ResponseEventMessage logging mode bit is set +func (m ClientLogMode) IsResponseEventMessage() bool { + return m&LogResponseEventMessage != 0 +} + +// ClearSigning clears the Signing logging mode bit +func (m *ClientLogMode) ClearSigning() { + *m &^= LogSigning +} + +// ClearRetries clears the Retries logging mode bit +func (m *ClientLogMode) ClearRetries() { + *m &^= LogRetries +} + +// ClearRequest clears the Request logging mode bit +func (m *ClientLogMode) ClearRequest() { + *m &^= LogRequest +} + +// ClearRequestWithBody clears the RequestWithBody logging mode bit +func (m *ClientLogMode) ClearRequestWithBody() { + *m &^= LogRequestWithBody +} + +// ClearResponse clears the Response logging mode bit +func (m *ClientLogMode) ClearResponse() { + *m &^= LogResponse +} + +// ClearResponseWithBody clears the ResponseWithBody logging mode bit +func (m *ClientLogMode) ClearResponseWithBody() { + *m &^= LogResponseWithBody +} + +// ClearDeprecatedUsage clears the DeprecatedUsage logging mode bit +func (m *ClientLogMode) ClearDeprecatedUsage() { + *m &^= LogDeprecatedUsage +} + +// ClearRequestEventMessage clears the RequestEventMessage logging mode bit +func (m *ClientLogMode) ClearRequestEventMessage() { + *m &^= LogRequestEventMessage +} + +// ClearResponseEventMessage clears the ResponseEventMessage logging mode bit +func (m *ClientLogMode) ClearResponseEventMessage() { + *m &^= LogResponseEventMessage +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go new file mode 100644 index 00000000000..6ecc2231a12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging_generate.go @@ -0,0 +1,95 @@ +//go:build clientlogmode +// +build clientlogmode + +package main + +import ( + "fmt" + "log" + "os" + "strings" + "text/template" +) + +var config = struct { + ModeBits []string +}{ + // Items should be appended only to keep bit-flag positions stable + ModeBits: []string{ + "Signing", + "Retries", + "Request", + "RequestWithBody", + "Response", + "ResponseWithBody", + "DeprecatedUsage", + "RequestEventMessage", + "ResponseEventMessage", + }, +} + +func bitName(name string) string { + return strings.ToUpper(name[:1]) + name[1:] +} + +var tmpl = template.Must(template.New("ClientLogMode").Funcs(map[string]interface{}{ + "symbolName": func(name string) string { + return "Log" + bitName(name) + }, + "bitName": bitName, +}).Parse(`// Code generated by aws/logging_generate.go DO NOT EDIT. + +package aws + +// ClientLogMode represents the logging mode of SDK clients. The client logging mode is a bit-field where +// each bit is a flag that describes the logging behavior for one or more client components. +// The entire 64-bit group is reserved for later expansion by the SDK. +// +// Example: Setting ClientLogMode to enable logging of retries and requests +// clientLogMode := aws.LogRetries | aws.LogRequest +// +// Example: Adding an additional log mode to an existing ClientLogMode value +// clientLogMode |= aws.LogResponse +type ClientLogMode uint64 + +// Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. +const ( +{{- range $index, $field := .ModeBits }} + {{ (symbolName $field) }}{{- if (eq 0 $index) }} ClientLogMode = 1 << (64 - 1 - iota){{- end }} +{{- end }} +) +{{ range $_, $field := .ModeBits }} +// Is{{- bitName $field }} returns whether the {{ bitName $field }} logging mode bit is set +func (m ClientLogMode) Is{{- bitName $field }}() bool { + return m&{{- (symbolName $field) }} != 0 +} +{{ end }} +{{- range $_, $field := .ModeBits }} +// Clear{{- bitName $field }} clears the {{ bitName $field }} logging mode bit +func (m *ClientLogMode) Clear{{- bitName $field }}() { + *m &^= {{ (symbolName $field) }} +} +{{ end -}} +`)) + +func main() { + uniqueBitFields := make(map[string]struct{}) + + for _, bitName := range config.ModeBits { + if _, ok := uniqueBitFields[strings.ToLower(bitName)]; ok { + panic(fmt.Sprintf("duplicate bit field: %s", bitName)) + } + uniqueBitFields[bitName] = struct{}{} + } + + file, err := os.Create("logging.go") + if err != nil { + log.Fatal(err) + } + defer file.Close() + + err = tmpl.Execute(file, config) + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go new file mode 100644 index 00000000000..2de15528c93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -0,0 +1,201 @@ +package middleware + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + + "github.com/aws/smithy-go/middleware" +) + +// RegisterServiceMetadata registers metadata about the service and operation into the middleware context +// so that it is available at runtime for other middleware to introspect. +type RegisterServiceMetadata struct { + ServiceID string + SigningName string + Region string + OperationName string +} + +// ID returns the middleware identifier. +func (s *RegisterServiceMetadata) ID() string { + return "RegisterServiceMetadata" +} + +// HandleInitialize registers service metadata information into the middleware context, allowing for introspection. +func (s RegisterServiceMetadata) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) (out middleware.InitializeOutput, metadata middleware.Metadata, err error) { + if len(s.ServiceID) > 0 { + ctx = SetServiceID(ctx, s.ServiceID) + } + if len(s.SigningName) > 0 { + ctx = SetSigningName(ctx, s.SigningName) + } + if len(s.Region) > 0 { + ctx = setRegion(ctx, s.Region) + } + if len(s.OperationName) > 0 { + ctx = setOperationName(ctx, s.OperationName) + } + return next.HandleInitialize(ctx, in) +} + +// service metadata keys for storing and lookup of runtime stack information. +type ( + serviceIDKey struct{} + signingNameKey struct{} + signingRegionKey struct{} + regionKey struct{} + operationNameKey struct{} + partitionIDKey struct{} + requiresLegacyEndpointsKey struct{} +) + +// GetServiceID retrieves the service id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetServiceID(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, serviceIDKey{}).(string) + return v +} + +// GetSigningName retrieves the service signing name from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) + return v +} + +// GetSigningRegion retrieves the region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetSigningRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) + return v +} + +// GetRegion retrieves the endpoint region from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRegion(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, regionKey{}).(string) + return v +} + +// GetOperationName retrieves the service operation metadata from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetOperationName(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, operationNameKey{}).(string) + return v +} + +// GetPartitionID retrieves the endpoint partition id from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPartitionID(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, partitionIDKey{}).(string) + return v +} + +// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint +// customizations need to be executed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRequiresLegacyEndpoints(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool) + return v +} + +// SetRequiresLegacyEndpoints set or modifies the flag indicated that +// legacy endpoint customizations are needed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) +} + +// SetSigningName set or modifies the signing name on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingNameKey{}, value) +} + +// SetSigningRegion sets or modifies the region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetSigningRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, signingRegionKey{}, value) +} + +// SetServiceID sets the service id on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetServiceID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, serviceIDKey{}, value) +} + +// setRegion sets the endpoint region on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRegion(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, regionKey{}, value) +} + +// setOperationName sets the service operation on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setOperationName(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, operationNameKey{}, value) +} + +// SetPartitionID sets the partition id of a resolved region on the context +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPartitionID(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, partitionIDKey{}, value) +} + +// EndpointSource key +type endpointSourceKey struct{} + +// GetEndpointSource returns an endpoint source if set on context +func GetEndpointSource(ctx context.Context) (v aws.EndpointSource) { + v, _ = middleware.GetStackValue(ctx, endpointSourceKey{}).(aws.EndpointSource) + return v +} + +// SetEndpointSource sets endpoint source on context +func SetEndpointSource(ctx context.Context, value aws.EndpointSource) context.Context { + return middleware.WithStackValue(ctx, endpointSourceKey{}, value) +} + +type signingCredentialsKey struct{} + +// GetSigningCredentials returns the credentials that were used for signing if set on context. +func GetSigningCredentials(ctx context.Context) (v aws.Credentials) { + v, _ = middleware.GetStackValue(ctx, signingCredentialsKey{}).(aws.Credentials) + return v +} + +// SetSigningCredentials sets the credentails used for signing on the context. +func SetSigningCredentials(ctx context.Context, value aws.Credentials) context.Context { + return middleware.WithStackValue(ctx, signingCredentialsKey{}, value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go new file mode 100644 index 00000000000..9bd0dfb1508 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -0,0 +1,168 @@ +package middleware + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyrand "github.com/aws/smithy-go/rand" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ClientRequestID is a Smithy BuildMiddleware that will generate a unique ID for logical API operation +// invocation. +type ClientRequestID struct{} + +// ID the identifier for the ClientRequestID +func (r *ClientRequestID) ID() string { + return "ClientRequestID" +} + +// HandleBuild attaches a unique operation invocation id for the operation to the request +func (r ClientRequestID) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + invocationID, err := smithyrand.NewUUID(rand.Reader).GetUUID() + if err != nil { + return out, metadata, err + } + + const invocationIDHeader = "Amz-Sdk-Invocation-Id" + req.Header[invocationIDHeader] = append(req.Header[invocationIDHeader][:0], invocationID) + + return next.HandleBuild(ctx, in) +} + +// RecordResponseTiming records the response timing for the SDK client requests. +type RecordResponseTiming struct{} + +// ID is the middleware identifier +func (a *RecordResponseTiming) ID() string { + return "RecordResponseTiming" +} + +// HandleDeserialize calculates response metadata and clock skew +func (a RecordResponseTiming) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + responseAt := sdk.NowTime() + setResponseAt(&metadata, responseAt) + + var serverTime time.Time + + switch resp := out.RawResponse.(type) { + case *smithyhttp.Response: + respDateHeader := resp.Header.Get("Date") + if len(respDateHeader) == 0 { + break + } + var parseErr error + serverTime, parseErr = smithyhttp.ParseTime(respDateHeader) + if parseErr != nil { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "failed to parse response Date header value, got %v", + parseErr.Error()) + break + } + setServerTime(&metadata, serverTime) + } + + if !serverTime.IsZero() { + attemptSkew := serverTime.Sub(responseAt) + setAttemptSkew(&metadata, attemptSkew) + } + + return out, metadata, err +} + +type responseAtKey struct{} + +// GetResponseAt returns the time response was received at. +func GetResponseAt(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(responseAtKey{}).(time.Time) + return v, ok +} + +// setResponseAt sets the response time on the metadata. +func setResponseAt(metadata *middleware.Metadata, v time.Time) { + metadata.Set(responseAtKey{}, v) +} + +type serverTimeKey struct{} + +// GetServerTime returns the server time for response. +func GetServerTime(metadata middleware.Metadata) (v time.Time, ok bool) { + v, ok = metadata.Get(serverTimeKey{}).(time.Time) + return v, ok +} + +// setServerTime sets the server time on the metadata. +func setServerTime(metadata *middleware.Metadata, v time.Time) { + metadata.Set(serverTimeKey{}, v) +} + +type attemptSkewKey struct{} + +// GetAttemptSkew returns Attempt clock skew for response from metadata. +func GetAttemptSkew(metadata middleware.Metadata) (v time.Duration, ok bool) { + v, ok = metadata.Get(attemptSkewKey{}).(time.Duration) + return v, ok +} + +// setAttemptSkew sets the attempt clock skew on the metadata. +func setAttemptSkew(metadata *middleware.Metadata, v time.Duration) { + metadata.Set(attemptSkewKey{}, v) +} + +// AddClientRequestIDMiddleware adds ClientRequestID to the middleware stack +func AddClientRequestIDMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ClientRequestID{}, middleware.After) +} + +// AddRecordResponseTiming adds RecordResponseTiming middleware to the +// middleware stack. +func AddRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&RecordResponseTiming{}, middleware.After) +} + +// rawResponseKey is the accessor key used to store and access the +// raw response within the response metadata. +type rawResponseKey struct{} + +// addRawResponse middleware adds raw response on to the metadata +type addRawResponse struct{} + +// ID the identifier for the ClientRequestID +func (m *addRawResponse) ID() string { + return "AddRawResponseToMetadata" +} + +// HandleDeserialize adds raw response on the middleware metadata +func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + metadata.Set(rawResponseKey{}, out.RawResponse) + return out, metadata, err +} + +// AddRawResponseToMetadata adds middleware to the middleware stack that +// store raw response on to the metadata. +func AddRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) +} + +// GetRawResponse returns raw response set on metadata +func GetRawResponse(metadata middleware.Metadata) interface{} { + return metadata.Get(rawResponseKey{}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go new file mode 100644 index 00000000000..ba262dadcd0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + os = "macos" + case "ios": + os = "ios" + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go new file mode 100644 index 00000000000..e14a1e4ecb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/osname_go115.go @@ -0,0 +1,24 @@ +//go:build !go1.16 +// +build !go1.16 + +package middleware + +import "runtime" + +func getNormalizedOSName() (os string) { + switch runtime.GOOS { + case "android": + os = "android" + case "linux": + os = "linux" + case "windows": + os = "windows" + case "darwin": + // Due to Apple M1 we can't distinguish between macOS and iOS when GOOS/GOARCH is darwin/amd64 + // For now declare this as "other" until we have a better detection mechanism. + fallthrough + default: + os = "other" + } + return os +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go new file mode 100644 index 00000000000..3f6aaf231e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "os" +) + +const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" +const envAmznTraceID = "_X_AMZN_TRACE_ID" +const amznTraceIDHeader = "X-Amzn-Trace-Id" + +// AddRecursionDetection adds recursionDetection to the middleware stack +func AddRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&RecursionDetection{}, middleware.After) +} + +// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent +// to avoid recursion invocation in Lambda +type RecursionDetection struct{} + +// ID returns the middleware identifier +func (m *RecursionDetection) ID() string { + return "RecursionDetection" +} + +// HandleBuild detects Lambda environment and adds its trace ID to request header if absent +func (m *RecursionDetection) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) + xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) + value := req.Header.Get(amznTraceIDHeader) + // only set the X-Amzn-Trace-Id header when it is not set initially, the + // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists + if value != "" || !hasLambdaEnv || !hasTraceID { + return next.HandleBuild(ctx, in) + } + + req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) + return next.HandleBuild(ctx, in) +} + +func percentEncode(s string) string { + upperhex := "0123456789ABCDEF" + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEncode(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + required := len(s) + 2*hexCount + t := make([]byte, required) + j := 0 + for i := 0; i < len(s); i++ { + if c := s[i]; shouldEncode(c) { + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + return string(t) +} + +func shouldEncode(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': + return false + default: + return true + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go new file mode 100644 index 00000000000..dd3391fe41e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/aws/smithy-go/middleware" +) + +// requestIDKey is used to retrieve request id from response metadata +type requestIDKey struct{} + +// SetRequestIDMetadata sets the provided request id over middleware metadata +func SetRequestIDMetadata(metadata *middleware.Metadata, id string) { + metadata.Set(requestIDKey{}, id) +} + +// GetRequestIDMetadata retrieves the request id from middleware metadata +// returns string and bool indicating value of request id, whether request id was set. +func GetRequestIDMetadata(metadata middleware.Metadata) (string, bool) { + if !metadata.Has(requestIDKey{}) { + return "", false + } + + v, ok := metadata.Get(requestIDKey{}).(string) + if !ok { + return "", true + } + return v, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go new file mode 100644 index 00000000000..7ce48c611cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -0,0 +1,49 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddRequestIDRetrieverMiddleware adds request id retriever middleware +func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before operation deserializers so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) +} + +type requestIDRetriever struct { +} + +// ID returns the middleware identifier +func (m *requestIDRetriever) ID() string { + return "RequestIDRetriever" +} + +func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // Different header which can map to request id + requestIDHeaderList := []string{"X-Amzn-Requestid", "X-Amz-RequestId"} + + for _, h := range requestIDHeaderList { + // check for headers known to contain Request id + if v := resp.Header.Get(h); len(v) != 0 { + // set reqID on metadata for successful responses. + SetRequestIDMetadata(&metadata, v) + break + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go new file mode 100644 index 00000000000..af3447ddc98 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -0,0 +1,261 @@ +package middleware + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var languageVersion = strings.TrimPrefix(runtime.Version(), "go") + +// SDKAgentKeyType is the metadata type to add to the SDK agent string +type SDKAgentKeyType int + +// The set of valid SDKAgentKeyType constants. If an unknown value is assigned for SDKAgentKeyType it will +// be mapped to AdditionalMetadata. +const ( + _ SDKAgentKeyType = iota + APIMetadata + OperatingSystemMetadata + LanguageMetadata + EnvironmentMetadata + FeatureMetadata + ConfigMetadata + FrameworkMetadata + AdditionalMetadata + ApplicationIdentifier +) + +func (k SDKAgentKeyType) string() string { + switch k { + case APIMetadata: + return "api" + case OperatingSystemMetadata: + return "os" + case LanguageMetadata: + return "lang" + case EnvironmentMetadata: + return "exec-env" + case FeatureMetadata: + return "ft" + case ConfigMetadata: + return "cfg" + case FrameworkMetadata: + return "lib" + case ApplicationIdentifier: + return "app" + case AdditionalMetadata: + fallthrough + default: + return "md" + } +} + +const execEnvVar = `AWS_EXECUTION_ENV` + +var validChars = map[rune]bool{ + '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, + '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, +} + +// requestUserAgent is a build middleware that set the User-Agent for the request. +type requestUserAgent struct { + sdkAgent, userAgent *smithyhttp.UserAgentBuilder +} + +// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// request. +// +// User-Agent example: +// +// aws-sdk-go-v2/1.2.3 +// +// X-Amz-User-Agent example: +// +// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 +func newRequestUserAgent() *requestUserAgent { + userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() + addProductName(userAgent) + addProductName(sdkAgent) + + r := &requestUserAgent{ + sdkAgent: sdkAgent, + userAgent: userAgent, + } + + addSDKMetadata(r) + + return r +} + +func addSDKMetadata(r *requestUserAgent) { + r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) + r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) + r.AddSDKAgentKeyValue(AdditionalMetadata, "GOARCH", runtime.GOARCH) + if ev := os.Getenv(execEnvVar); len(ev) > 0 { + r.AddSDKAgentKey(EnvironmentMetadata, ev) + } +} + +func addProductName(builder *smithyhttp.UserAgentBuilder) { + builder.AddKeyValue(aws.SDKName, aws.SDKVersion) +} + +// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKey(key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKey(key) + return nil + } +} + +// AddUserAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddUserAgentKeyValue(key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddUserAgentKeyValue(key, value) + return nil + } +} + +// AddSDKAgentKey retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKey(keyType SDKAgentKeyType, key string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKey(keyType, key) + return nil + } +} + +// AddSDKAgentKeyValue retrieves a requestUserAgent from the provided stack, or initializes one. +func AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + requestUserAgent, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + requestUserAgent.AddSDKAgentKeyValue(keyType, key, value) + return nil + } +} + +// AddRequestUserAgentMiddleware registers a requestUserAgent middleware on the stack if not present. +func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { + _, err := getOrAddRequestUserAgent(stack) + return err +} + +func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { + id := (*requestUserAgent)(nil).ID() + bm, ok := stack.Build.Get(id) + if !ok { + bm = newRequestUserAgent() + err := stack.Build.Add(bm, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := bm.(*requestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) + } + + return requestUserAgent, nil +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKey(key string) { + u.userAgent.AddKey(strings.Map(rules, key)) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { + u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) +} + +// AddUserAgentKey adds the component identified by name to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { + // TODO: should target sdkAgent + u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) +} + +// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { + // TODO: should target sdkAgent + u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) +} + +// ID the name of the middleware. +func (u *requestUserAgent) ID() string { + return "UserAgent" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + switch req := in.Request.(type) { + case *smithyhttp.Request: + u.addHTTPUserAgent(req) + // TODO: To be re-enabled + // u.addHTTPSDKAgent(req) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + return next.HandleBuild(ctx, in) +} + +func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { + const userAgent = "User-Agent" + updateHTTPHeader(request, userAgent, u.userAgent.Build()) +} + +func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { + const sdkAgent = "X-Amz-User-Agent" + updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) +} + +func updateHTTPHeader(request *smithyhttp.Request, header string, value string) { + var current string + if v := request.Header[header]; len(v) > 0 { + current = v[0] + } + if len(current) > 0 { + current = value + " " + current + } else { + current = value + } + request.Header[header] = append(request.Header[header][:0], current) +} + +func rules(r rune) rune { + switch { + case r >= '0' && r <= '9': + return r + case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z': + return r + case validChars[r]: + return r + default: + return '-' + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go new file mode 100644 index 00000000000..47ebc0f5476 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -0,0 +1,72 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Array represents the encoding of Query lists and sets. A Query array is a +// representation of a list of values of a fixed type. A serialized array might +// look like the following: +// +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz +type Array struct { + // The query values to add the array to. + values url.Values + // The array's prefix, which includes the names of all parent structures + // and ends with the name of the list. For example, the prefix might be + // "ParentStructure.ListName". This prefix will be used to form the full + // keys for each element in the list. For example, an entry might have the + // key "ParentStructure.ListName.member.MemberName.1". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the list is flat or not. A list that is not flat will produce the + // following entry to the url.Values for a given entry: + // ListName.MemberName.1=value + // A list that is flat will produce the following: + // ListName.1=value + flat bool + // The location name of the member. In most cases this should be "member". + memberName string + // Elements are stored in values, so we keep track of the list size here. + size int32 + // Empty lists are encoded as "=", if we add a value later we will + // remove this encoding + emptyValue Value +} + +func newArray(values url.Values, prefix string, flat bool, memberName string) *Array { + emptyValue := newValue(values, prefix, flat) + emptyValue.String("") + + return &Array{ + values: values, + prefix: prefix, + flat: flat, + memberName: memberName, + emptyValue: emptyValue, + } +} + +// Value adds a new element to the Query Array. Returns a Value type used to +// encode the array element. +func (a *Array) Value() Value { + if a.size == 0 { + delete(a.values, a.emptyValue.key) + } + + // Query lists start a 1, so adjust the size first + a.size++ + prefix := a.prefix + if !a.flat { + prefix = fmt.Sprintf("%s.%s", prefix, a.memberName) + } + // Lists can't have flat members + return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go new file mode 100644 index 00000000000..2ecf9241cdd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/encoder.go @@ -0,0 +1,80 @@ +package query + +import ( + "io" + "net/url" + "sort" +) + +// Encoder is a Query encoder that supports construction of Query body +// values using methods. +type Encoder struct { + // The query values that will be built up to manage encoding. + values url.Values + // The writer that the encoded body will be written to. + writer io.Writer + Value +} + +// NewEncoder returns a new Query body encoder +func NewEncoder(writer io.Writer) *Encoder { + values := url.Values{} + return &Encoder{ + values: values, + writer: writer, + Value: newBaseValue(values), + } +} + +// Encode returns the []byte slice representing the current +// state of the Query encoder. +func (e Encoder) Encode() error { + ws, ok := e.writer.(interface{ WriteString(string) (int, error) }) + if !ok { + // Fall back to less optimal byte slice casting if WriteString isn't available. + ws = &wrapWriteString{writer: e.writer} + } + + // Get the keys and sort them to have a stable output + keys := make([]string, 0, len(e.values)) + for k := range e.values { + keys = append(keys, k) + } + sort.Strings(keys) + isFirstEntry := true + for _, key := range keys { + queryValues := e.values[key] + escapedKey := url.QueryEscape(key) + for _, value := range queryValues { + if !isFirstEntry { + if _, err := ws.WriteString(`&`); err != nil { + return err + } + } else { + isFirstEntry = false + } + if _, err := ws.WriteString(escapedKey); err != nil { + return err + } + if _, err := ws.WriteString(`=`); err != nil { + return err + } + if _, err := ws.WriteString(url.QueryEscape(value)); err != nil { + return err + } + } + } + return nil +} + +// wrapWriteString wraps an io.Writer to provide a WriteString method +// where one is not available. +type wrapWriteString struct { + writer io.Writer +} + +// WriteString writes a string to the wrapped writer by casting it to +// a byte array first. +func (w wrapWriteString) WriteString(v string) (int, error) { + return w.writer.Write([]byte(v)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go new file mode 100644 index 00000000000..dea242b8b6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -0,0 +1,78 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Map represents the encoding of Query maps. A Query map is a representation +// of a mapping of arbitrary string keys to arbitrary values of a fixed type. +// A Map differs from an Object in that the set of keys is not fixed, in that +// the values must all be of the same type, and that map entries are ordered. +// A serialized map might look like the following: +// +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs +type Map struct { + // The query values to add the map to. + values url.Values + // The map's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.MapName". This prefix will be used to form the full + // keys for each key-value pair of the map. For example, a value might have + // the key "ParentStructure.MapName.1.value". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string + // Whether the map is flat or not. A map that is not flat will produce the + // following entries to the url.Values for a given key-value pair: + // MapName.entry.1.KeyLocationName=mykey + // MapName.entry.1.ValueLocationName=myvalue + // A map that is flat will produce the following: + // MapName.1.KeyLocationName=mykey + // MapName.1.ValueLocationName=myvalue + flat bool + // The location name of the key. In most cases this should be "key". + keyLocationName string + // The location name of the value. In most cases this should be "value". + valueLocationName string + // Elements are stored in values, so we keep track of the list size here. + size int32 +} + +func newMap(values url.Values, prefix string, flat bool, keyLocationName string, valueLocationName string) *Map { + return &Map{ + values: values, + prefix: prefix, + flat: flat, + keyLocationName: keyLocationName, + valueLocationName: valueLocationName, + } +} + +// Key adds the given named key to the Query map. +// Returns a Value encoder that should be used to encode a Query value type. +func (m *Map) Key(name string) Value { + // Query lists start a 1, so adjust the size first + m.size++ + var key string + var value string + if m.flat { + key = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.%d.%s", m.prefix, m.size, m.valueLocationName) + } else { + key = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.keyLocationName) + value = fmt.Sprintf("%s.entry.%d.%s", m.prefix, m.size, m.valueLocationName) + } + + // The key can only be a string, so we just go ahead and set it here + newValue(m.values, key, false).String(name) + + // Maps can't have flat members + return newValue(m.values, value, false) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go new file mode 100644 index 00000000000..36034479113 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/middleware.go @@ -0,0 +1,62 @@ +package query + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddAsGetRequestMiddleware adds a middleware to the Serialize stack after the +// operation serializer that will convert the query request body to a GET +// operation with the query message in the HTTP request querystring. +func AddAsGetRequestMiddleware(stack *middleware.Stack) error { + return stack.Serialize.Insert(&asGetRequest{}, "OperationSerializer", middleware.After) +} + +type asGetRequest struct{} + +func (*asGetRequest) ID() string { return "Query:AsGetRequest" } + +func (m *asGetRequest) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect smithy HTTP Request, got %T", input.Request) + } + + req.Method = "GET" + + // If the stream is not set, nothing else to do. + stream := req.GetStream() + if stream == nil { + return next.HandleSerialize(ctx, input) + } + + // Clear the stream since there will not be any body. + req.Header.Del("Content-Type") + req, err = req.SetStream(nil) + if err != nil { + return out, metadata, fmt.Errorf("unable update request body %w", err) + } + input.Request = req + + // Update request query with the body's query string value. + delim := "" + if len(req.URL.RawQuery) != 0 { + delim = "&" + } + + b, err := ioutil.ReadAll(stream) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request body %w", err) + } + req.URL.RawQuery += delim + string(b) + + return next.HandleSerialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go new file mode 100644 index 00000000000..455b92515ca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -0,0 +1,69 @@ +package query + +import ( + "fmt" + "net/url" +) + +// Object represents the encoding of Query structures and unions. A Query +// object is a representation of a mapping of string keys to arbitrary +// values where there is a fixed set of keys whose values each have their +// own known type. A serialized object might look like the following: +// +// ObjectName.Foo=value +// &ObjectName.Bar=5 +type Object struct { + // The query values to add the object to. + values url.Values + // The object's prefix, which includes the names of all parent structures + // and ends with the name of the object. For example, the prefix might be + // "ParentStructure.ObjectName". This prefix will be used to form the full + // keys for each member of the object. For example, a member might have the + // key "ParentStructure.ObjectName.MemberName". + // + // While this is currently represented as a string that gets added to, it + // could also be represented as a stack that only gets condensed into a + // string when a finalized key is created. This could potentially reduce + // allocations. + prefix string +} + +func newObject(values url.Values, prefix string) *Object { + return &Object{ + values: values, + prefix: prefix, + } +} + +// Key adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. +func (o *Object) Key(name string) Value { + return o.key(name, false) +} + +// KeyWithValues adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query list of values. +func (o *Object) KeyWithValues(name string) Value { + return o.keyWithValues(name, false) +} + +// FlatKey adds the given named key to the Query object. +// Returns a Value encoder that should be used to encode a Query value type. The +// value will be flattened if it is a map or array. +func (o *Object) FlatKey(name string) Value { + return o.key(name, true) +} + +func (o *Object) key(name string, flatValue bool) Value { + if o.prefix != "" { + return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newValue(o.values, name, flatValue) +} + +func (o *Object) keyWithValues(name string, flatValue bool) Value { + if o.prefix != "" { + return newAppendValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue) + } + return newAppendValue(o.values, name, flatValue) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go new file mode 100644 index 00000000000..a9251521f12 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go @@ -0,0 +1,115 @@ +package query + +import ( + "math/big" + "net/url" + + "github.com/aws/smithy-go/encoding/httpbinding" +) + +// Value represents a Query Value type. +type Value struct { + // The query values to add the value to. + values url.Values + // The value's key, which will form the prefix for complex types. + key string + // Whether the value should be flattened or not if it's a flattenable type. + flat bool + queryValue httpbinding.QueryValue +} + +func newValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, false), + } +} + +func newAppendValue(values url.Values, key string, flat bool) Value { + return Value{ + values: values, + key: key, + flat: flat, + queryValue: httpbinding.NewQueryValue(values, key, true), + } +} + +func newBaseValue(values url.Values) Value { + return Value{ + values: values, + queryValue: httpbinding.NewQueryValue(nil, "", false), + } +} + +// Array returns a new Array encoder. +func (qv Value) Array(locationName string) *Array { + return newArray(qv.values, qv.key, qv.flat, locationName) +} + +// Object returns a new Object encoder. +func (qv Value) Object() *Object { + return newObject(qv.values, qv.key) +} + +// Map returns a new Map encoder. +func (qv Value) Map(keyLocationName string, valueLocationName string) *Map { + return newMap(qv.values, qv.key, qv.flat, keyLocationName, valueLocationName) +} + +// Base64EncodeBytes encodes v as a base64 query string value. +// This is intended to enable compatibility with the JSON encoder. +func (qv Value) Base64EncodeBytes(v []byte) { + qv.queryValue.Blob(v) +} + +// Boolean encodes v as a query string value +func (qv Value) Boolean(v bool) { + qv.queryValue.Boolean(v) +} + +// String encodes v as a query string value +func (qv Value) String(v string) { + qv.queryValue.String(v) +} + +// Byte encodes v as a query string value +func (qv Value) Byte(v int8) { + qv.queryValue.Byte(v) +} + +// Short encodes v as a query string value +func (qv Value) Short(v int16) { + qv.queryValue.Short(v) +} + +// Integer encodes v as a query string value +func (qv Value) Integer(v int32) { + qv.queryValue.Integer(v) +} + +// Long encodes v as a query string value +func (qv Value) Long(v int64) { + qv.queryValue.Long(v) +} + +// Float encodes v as a query string value +func (qv Value) Float(v float32) { + qv.queryValue.Float(v) +} + +// Double encodes v as a query string value +func (qv Value) Double(v float64) { + qv.queryValue.Double(v) +} + +// BigInteger encodes v as a query string value +func (qv Value) BigInteger(v *big.Int) { + qv.queryValue.BigInteger(v) +} + +// BigDecimal encodes v as a query string value +func (qv Value) BigDecimal(v *big.Float) { + qv.queryValue.BigDecimal(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go new file mode 100644 index 00000000000..1bce78a4d45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/restjson/decoder_util.go @@ -0,0 +1,85 @@ +package restjson + +import ( + "encoding/json" + "io" + "strings" + + "github.com/aws/smithy-go" +) + +// GetErrorInfo util looks for code, __type, and message members in the +// json body. These members are optionally available, and the function +// returns the value of member if it is available. This function is useful to +// identify the error code, msg in a REST JSON error response. +func GetErrorInfo(decoder *json.Decoder) (errorType string, message string, err error) { + var errInfo struct { + Code string + Type string `json:"__type"` + Message string + } + + err = decoder.Decode(&errInfo) + if err != nil { + if err == io.EOF { + return errorType, message, nil + } + return errorType, message, err + } + + // assign error type + if len(errInfo.Code) != 0 { + errorType = errInfo.Code + } else if len(errInfo.Type) != 0 { + errorType = errInfo.Type + } + + // assign error message + if len(errInfo.Message) != 0 { + message = errInfo.Message + } + + // sanitize error + if len(errorType) != 0 { + errorType = SanitizeErrorCode(errorType) + } + + return errorType, message, nil +} + +// SanitizeErrorCode sanitizes the errorCode string . +// The rule for sanitizing is if a `:` character is present, then take only the +// contents before the first : character in the value. +// If a # character is present, then take only the contents after the +// first # character in the value. +func SanitizeErrorCode(errorCode string) string { + if strings.ContainsAny(errorCode, ":") { + errorCode = strings.SplitN(errorCode, ":", 2)[0] + } + + if strings.ContainsAny(errorCode, "#") { + errorCode = strings.SplitN(errorCode, "#", 2)[1] + } + + return errorCode +} + +// GetSmithyGenericAPIError returns smithy generic api error and an error interface. +// Takes in json decoder, and error Code string as args. The function retrieves error message +// and error code from the decoder body. If errorCode of length greater than 0 is passed in as +// an argument, it is used instead. +func GetSmithyGenericAPIError(decoder *json.Decoder, errorCode string) (*smithy.GenericAPIError, error) { + errorType, message, err := GetErrorInfo(decoder) + if err != nil { + return nil, err + } + + if len(errorCode) == 0 { + errorCode = errorType + } + + return &smithy.GenericAPIError{ + Code: errorCode, + Message: message, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go new file mode 100644 index 00000000000..6975ce6524d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go @@ -0,0 +1,48 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string + RequestID string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents(errResponse), nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents(errResponse), nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal Error wrapping +type noWrappedErrorResponse struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + RequestID string `xml:"RequestId"` +} + +// wrappedErrorResponse represents the error response body +// wrapped within Error +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go new file mode 100644 index 00000000000..974ef594f07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_bucket.go @@ -0,0 +1,96 @@ +package ratelimit + +import ( + "sync" +) + +// TokenBucket provides a concurrency safe utility for adding and removing +// tokens from the available token bucket. +type TokenBucket struct { + remainingTokens uint + maxCapacity uint + minCapacity uint + mu sync.Mutex +} + +// NewTokenBucket returns an initialized TokenBucket with the capacity +// specified. +func NewTokenBucket(i uint) *TokenBucket { + return &TokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *TokenBucket) Retrieve(amount uint) (available uint, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *TokenBucket) Refund(amount uint) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = uintMin(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *TokenBucket) Capacity() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *TokenBucket) Remaining() uint { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *TokenBucket) Resize(size uint) uint { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = uintMax(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = uintMin(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} + +func uintMin(a, b uint) uint { + if a < b { + return a + } + return b +} + +func uintMax(a, b uint) uint { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go new file mode 100644 index 00000000000..d89090ad38e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go @@ -0,0 +1,83 @@ +package ratelimit + +import ( + "context" + "fmt" +) + +type rateToken struct { + tokenCost uint + bucket *TokenBucket +} + +func (t rateToken) release() error { + t.bucket.Refund(t.tokenCost) + return nil +} + +// TokenRateLimit provides a Token Bucket RateLimiter implementation +// that limits the overall number of retry attempts that can be made across +// operation invocations. +type TokenRateLimit struct { + bucket *TokenBucket +} + +// NewTokenRateLimit returns an TokenRateLimit with default values. +// Functional options can configure the retry rate limiter. +func NewTokenRateLimit(tokens uint) *TokenRateLimit { + return &TokenRateLimit{ + bucket: NewTokenBucket(tokens), + } +} + +type canceledError struct { + Err error +} + +func (c canceledError) CanceledError() bool { return true } +func (c canceledError) Unwrap() error { return c.Err } +func (c canceledError) Error() string { + return fmt.Sprintf("canceled, %v", c.Err) +} + +// GetToken may cause a available pool of retry quota to be +// decremented. Will return an error if the decremented value can not be +// reduced from the retry quota. +func (l *TokenRateLimit) GetToken(ctx context.Context, cost uint) (func() error, error) { + select { + case <-ctx.Done(): + return nil, canceledError{Err: ctx.Err()} + default: + } + if avail, ok := l.bucket.Retrieve(cost); !ok { + return nil, QuotaExceededError{Available: avail, Requested: cost} + } + + return rateToken{ + tokenCost: cost, + bucket: l.bucket, + }.release, nil +} + +// AddTokens increments the token bucket by a fixed amount. +func (l *TokenRateLimit) AddTokens(v uint) error { + l.bucket.Refund(v) + return nil +} + +// Remaining returns the number of remaining tokens in the bucket. +func (l *TokenRateLimit) Remaining() uint { + return l.bucket.Remaining() +} + +// QuotaExceededError provides the SDK error when the retries for a given +// token bucket have been exhausted. +type QuotaExceededError struct { + Available uint + Requested uint +} + +func (e QuotaExceededError) Error() string { + return fmt.Sprintf("retry quota exceeded, %d available, %d requested", + e.Available, e.Requested) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go new file mode 100644 index 00000000000..d8d00e61582 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/request.go @@ -0,0 +1,25 @@ +package aws + +import ( + "fmt" +) + +// TODO remove replace with smithy.CanceledError + +// RequestCanceledError is the error that will be returned by an API request +// that was canceled. Requests given a Context may return this error when +// canceled. +type RequestCanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*RequestCanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *RequestCanceledError) Unwrap() error { + return e.Err +} +func (e *RequestCanceledError) Error() string { + return fmt.Sprintf("request canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go new file mode 100644 index 00000000000..4dfde857373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive.go @@ -0,0 +1,156 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +const ( + // DefaultRequestCost is the cost of a single request from the adaptive + // rate limited token bucket. + DefaultRequestCost uint = 1 +) + +// DefaultThrottles provides the set of errors considered throttle errors that +// are checked by default. +var DefaultThrottles = []IsErrorThrottle{ + ThrottleErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// AdaptiveModeOptions provides the functional options for configuring the +// adaptive retry mode, and delay behavior. +type AdaptiveModeOptions struct { + // If the adaptive token bucket is empty, when an attempt will be made + // AdaptiveMode will sleep until a token is available. This can occur when + // attempts fail with throttle errors. Use this option to disable the sleep + // until token is available, and return error immediately. + FailOnNoAttemptTokens bool + + // The cost of an attempt from the AdaptiveMode's adaptive token bucket. + RequestCost uint + + // Set of strategies to determine if the attempt failed due to a throttle + // error. + // + // It is safe to append to this list in NewAdaptiveMode's functional options. + Throttles []IsErrorThrottle + + // Set of options for standard retry mode that AdaptiveMode is built on top + // of. AdaptiveMode may apply its own defaults to Standard retry mode that + // are different than the defaults of NewStandard. Use these options to + // override the default options. + StandardOptions []func(*StandardOptions) +} + +// AdaptiveMode provides an experimental retry strategy that expands on the +// Standard retry strategy, adding client attempt rate limits. The attempt rate +// limit is initially unrestricted, but becomes restricted when the attempt +// fails with for a throttle error. When restricted AdaptiveMode may need to +// sleep before an attempt is made, if too many throttles have been received. +// AdaptiveMode's sleep can be canceled with context cancel. Set +// AdaptiveModeOptions FailOnNoAttemptTokens to change the behavior from sleep, +// to fail fast. +// +// Eventually unrestricted attempt rate limit will be restored once attempts no +// longer are failing due to throttle errors. +type AdaptiveMode struct { + options AdaptiveModeOptions + throttles IsErrorThrottles + + retryer aws.RetryerV2 + rateLimit *adaptiveRateLimit +} + +// NewAdaptiveMode returns an initialized AdaptiveMode retry strategy. +func NewAdaptiveMode(optFns ...func(*AdaptiveModeOptions)) *AdaptiveMode { + o := AdaptiveModeOptions{ + RequestCost: DefaultRequestCost, + Throttles: append([]IsErrorThrottle{}, DefaultThrottles...), + } + for _, fn := range optFns { + fn(&o) + } + + return &AdaptiveMode{ + options: o, + throttles: IsErrorThrottles(o.Throttles), + retryer: NewStandard(o.StandardOptions...), + rateLimit: newAdaptiveRateLimit(), + } +} + +// IsErrorRetryable returns if the failed attempt is retryable. This check +// should determine if the error can be retried, or if the error is +// terminal. +func (a *AdaptiveMode) IsErrorRetryable(err error) bool { + return a.retryer.IsErrorRetryable(err) +} + +// MaxAttempts returns the maximum number of attempts that can be made for +// an attempt before failing. A value of 0 implies that the attempt should +// be retried until it succeeds if the errors are retryable. +func (a *AdaptiveMode) MaxAttempts() int { + return a.retryer.MaxAttempts() +} + +// RetryDelay returns the delay that should be used before retrying the +// attempt. Will return error if the if the delay could not be determined. +func (a *AdaptiveMode) RetryDelay(attempt int, opErr error) ( + time.Duration, error, +) { + return a.retryer.RetryDelay(attempt, opErr) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (a *AdaptiveMode) GetRetryToken(ctx context.Context, opErr error) ( + releaseToken func(error) error, err error, +) { + return a.retryer.GetRetryToken(ctx, opErr) +} + +// GetInitialToken returns the initial attempt token that can increment the +// retry token pool if the attempt is successful. +// +// Deprecated: This method does not provide a way to block using Context, +// nor can it return an error. Use RetryerV2, and GetAttemptToken instead. Only +// present to implement Retryer interface. +func (a *AdaptiveMode) GetInitialToken() (releaseToken func(error) error) { + return nopRelease +} + +// GetAttemptToken returns the attempt token that can be used to rate limit +// attempt calls. Will be used by the SDK's retry package's Attempt +// middleware to get an attempt token prior to calling the temp and releasing +// the attempt token after the attempt has been made. +func (a *AdaptiveMode) GetAttemptToken(ctx context.Context) (func(error) error, error) { + for { + acquiredToken, waitTryAgain := a.rateLimit.AcquireToken(a.options.RequestCost) + if acquiredToken { + break + } + if a.options.FailOnNoAttemptTokens { + return nil, fmt.Errorf( + "unable to get attempt token, and FailOnNoAttemptTokens enables") + } + + if err := sdk.SleepWithContext(ctx, waitTryAgain); err != nil { + return nil, fmt.Errorf("failed to wait for token to be available, %w", err) + } + } + + return a.handleResponse, nil +} + +func (a *AdaptiveMode) handleResponse(opErr error) error { + throttled := a.throttles.IsErrorThrottle(opErr).Bool() + + a.rateLimit.Update(throttled) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go new file mode 100644 index 00000000000..ad96d9b8c5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_ratelimit.go @@ -0,0 +1,158 @@ +package retry + +import ( + "math" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +type adaptiveRateLimit struct { + tokenBucketEnabled bool + + smooth float64 + beta float64 + scaleConstant float64 + minFillRate float64 + + fillRate float64 + calculatedRate float64 + lastRefilled time.Time + measuredTxRate float64 + lastTxRateBucket float64 + requestCount int64 + lastMaxRate float64 + lastThrottleTime time.Time + timeWindow float64 + + tokenBucket *adaptiveTokenBucket + + mu sync.Mutex +} + +func newAdaptiveRateLimit() *adaptiveRateLimit { + now := sdk.NowTime() + return &adaptiveRateLimit{ + smooth: 0.8, + beta: 0.7, + scaleConstant: 0.4, + + minFillRate: 0.5, + + lastTxRateBucket: math.Floor(timeFloat64Seconds(now)), + lastThrottleTime: now, + + tokenBucket: newAdaptiveTokenBucket(0), + } +} + +func (a *adaptiveRateLimit) Enable(v bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.tokenBucketEnabled = v +} + +func (a *adaptiveRateLimit) AcquireToken(amount uint) ( + tokenAcquired bool, waitTryAgain time.Duration, +) { + a.mu.Lock() + defer a.mu.Unlock() + + if !a.tokenBucketEnabled { + return true, 0 + } + + a.tokenBucketRefill() + + available, ok := a.tokenBucket.Retrieve(float64(amount)) + if !ok { + waitDur := float64Seconds((float64(amount) - available) / a.fillRate) + return false, waitDur + } + + return true, 0 +} + +func (a *adaptiveRateLimit) Update(throttled bool) { + a.mu.Lock() + defer a.mu.Unlock() + + a.updateMeasuredRate() + + if throttled { + rateToUse := a.measuredTxRate + if a.tokenBucketEnabled { + rateToUse = math.Min(a.measuredTxRate, a.fillRate) + } + + a.lastMaxRate = rateToUse + a.calculateTimeWindow() + a.lastThrottleTime = sdk.NowTime() + a.calculatedRate = a.cubicThrottle(rateToUse) + a.tokenBucketEnabled = true + } else { + a.calculateTimeWindow() + a.calculatedRate = a.cubicSuccess(sdk.NowTime()) + } + + newRate := math.Min(a.calculatedRate, 2*a.measuredTxRate) + a.tokenBucketUpdateRate(newRate) +} + +func (a *adaptiveRateLimit) cubicSuccess(t time.Time) float64 { + dt := secondsFloat64(t.Sub(a.lastThrottleTime)) + return (a.scaleConstant * math.Pow(dt-a.timeWindow, 3)) + a.lastMaxRate +} + +func (a *adaptiveRateLimit) cubicThrottle(rateToUse float64) float64 { + return rateToUse * a.beta +} + +func (a *adaptiveRateLimit) calculateTimeWindow() { + a.timeWindow = math.Pow((a.lastMaxRate*(1.-a.beta))/a.scaleConstant, 1./3.) +} + +func (a *adaptiveRateLimit) tokenBucketUpdateRate(newRPS float64) { + a.tokenBucketRefill() + a.fillRate = math.Max(newRPS, a.minFillRate) + a.tokenBucket.Resize(newRPS) +} + +func (a *adaptiveRateLimit) updateMeasuredRate() { + now := sdk.NowTime() + timeBucket := math.Floor(timeFloat64Seconds(now)*2.) / 2. + a.requestCount++ + + if timeBucket > a.lastTxRateBucket { + currentRate := float64(a.requestCount) / (timeBucket - a.lastTxRateBucket) + a.measuredTxRate = (currentRate * a.smooth) + (a.measuredTxRate * (1. - a.smooth)) + a.requestCount = 0 + a.lastTxRateBucket = timeBucket + } +} + +func (a *adaptiveRateLimit) tokenBucketRefill() { + now := sdk.NowTime() + if a.lastRefilled.IsZero() { + a.lastRefilled = now + return + } + + fillAmount := secondsFloat64(now.Sub(a.lastRefilled)) * a.fillRate + a.tokenBucket.Refund(fillAmount) + a.lastRefilled = now +} + +func float64Seconds(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +func secondsFloat64(v time.Duration) float64 { + return float64(v) / float64(time.Second) +} + +func timeFloat64Seconds(v time.Time) float64 { + return float64(v.UnixNano()) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go new file mode 100644 index 00000000000..052723e8ed1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/adaptive_token_bucket.go @@ -0,0 +1,83 @@ +package retry + +import ( + "math" + "sync" +) + +// adaptiveTokenBucket provides a concurrency safe utility for adding and +// removing tokens from the available token bucket. +type adaptiveTokenBucket struct { + remainingTokens float64 + maxCapacity float64 + minCapacity float64 + mu sync.Mutex +} + +// newAdaptiveTokenBucket returns an initialized adaptiveTokenBucket with the +// capacity specified. +func newAdaptiveTokenBucket(i float64) *adaptiveTokenBucket { + return &adaptiveTokenBucket{ + remainingTokens: i, + maxCapacity: i, + minCapacity: 1, + } +} + +// Retrieve attempts to reduce the available tokens by the amount requested. If +// there are tokens available true will be returned along with the number of +// available tokens remaining. If amount requested is larger than the available +// capacity, false will be returned along with the available capacity. If the +// amount is less than the available capacity, the capacity will be reduced by +// that amount, and the remaining capacity and true will be returned. +func (t *adaptiveTokenBucket) Retrieve(amount float64) (available float64, retrieved bool) { + t.mu.Lock() + defer t.mu.Unlock() + + if amount > t.remainingTokens { + return t.remainingTokens, false + } + + t.remainingTokens -= amount + return t.remainingTokens, true +} + +// Refund returns the amount of tokens back to the available token bucket, up +// to the initial capacity. +func (t *adaptiveTokenBucket) Refund(amount float64) { + t.mu.Lock() + defer t.mu.Unlock() + + // Capacity cannot exceed max capacity. + t.remainingTokens = math.Min(t.remainingTokens+amount, t.maxCapacity) +} + +// Capacity returns the maximum capacity of tokens that the bucket could +// contain. +func (t *adaptiveTokenBucket) Capacity() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.maxCapacity +} + +// Remaining returns the number of tokens that remaining in the bucket. +func (t *adaptiveTokenBucket) Remaining() float64 { + t.mu.Lock() + defer t.mu.Unlock() + + return t.remainingTokens +} + +// Resize adjusts the size of the token bucket. Returns the capacity remaining. +func (t *adaptiveTokenBucket) Resize(size float64) float64 { + t.mu.Lock() + defer t.mu.Unlock() + + t.maxCapacity = math.Max(size, t.minCapacity) + + // Capacity needs to be capped at max capacity, if max size reduced. + t.remainingTokens = math.Min(t.remainingTokens, t.maxCapacity) + + return t.remainingTokens +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go new file mode 100644 index 00000000000..3a08ebe0a72 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go @@ -0,0 +1,80 @@ +// Package retry provides interfaces and implementations for SDK request retry behavior. +// +// # Retryer Interface and Implementations +// +// This package defines Retryer interface that is used to either implement custom retry behavior +// or to extend the existing retry implementations provided by the SDK. This package provides a single +// retry implementation: Standard. +// +// # Standard +// +// Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited +// retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. +// In addition, the retryer uses a configurable token bucket to rate limit the retry attempts across the client, +// and uses an additional delay policy to limit the time between a requests subsequent attempts. +// +// By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether +// a given error is retryable. By default this list of retryables includes the following: +// - Retrying errors that implement the RetryableError method, and return true. +// - Connection Errors +// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. +// - Connection Reset Errors. +// - net.OpErr types that are dialing errors or are temporary. +// - HTTP Status Codes: 500, 502, 503, and 504. +// - API Error Codes +// - RequestTimeout, RequestTimeoutException +// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, +// RequestThrottled, SlowDown, EC2ThrottledException +// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException +// - TransactionInProgressException, PriorRequestNotComplete +// +// The standard retryer will not retry a request in the event if the context associated with the request +// has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context +// value. +// +// You can configure the standard retryer implementation to fit your applications by constructing a standard retryer +// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions +// structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, +// and the retry delay policy. +// +// For example to modify the default retry attempts for the standard retryer: +// +// // configure the custom retryer +// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { +// o.MaxAttempts = 5 +// }) +// +// // create a service client with the retryer +// s3.NewFromConfig(cfg, func(o *s3.Options) { +// o.Retryer = customRetry +// }) +// +// # Utilities +// +// A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic +// way. These are: +// +// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable +// in addition to those considered retryable by the provided retryer. +// +// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping +// a retryer implementation. +// +// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a +// request by wrapping a retryer implementation. +// +// The following package functions have been provided to easily satisfy different retry interfaces to further customize +// a given retryer's behavior: +// +// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, +// you can use this method to easily create custom back off policies to be used with the +// standard retryer. +// +// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be retried. +// +// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be considered a timeout. +package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go new file mode 100644 index 00000000000..3e432eefe77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/errors.go @@ -0,0 +1,20 @@ +package retry + +import "fmt" + +// MaxAttemptsError provides the error when the maximum number of attempts have +// been exceeded. +type MaxAttemptsError struct { + Attempt int + Err error +} + +func (e *MaxAttemptsError) Error() string { + return fmt.Sprintf("exceeded maximum number of attempts, %d, %v", e.Attempt, e.Err) +} + +// Unwrap returns the nested error causing the max attempts error. Provides the +// implementation for errors.Is and errors.As to unwrap nested errors. +func (e *MaxAttemptsError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go new file mode 100644 index 00000000000..c266996dea2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/jitter_backoff.go @@ -0,0 +1,49 @@ +package retry + +import ( + "math" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/timeconv" +) + +// ExponentialJitterBackoff provides backoff delays with jitter based on the +// number of attempts. +type ExponentialJitterBackoff struct { + maxBackoff time.Duration + // precomputed number of attempts needed to reach max backoff. + maxBackoffAttempts float64 + + randFloat64 func() (float64, error) +} + +// NewExponentialJitterBackoff returns an ExponentialJitterBackoff configured +// for the max backoff. +func NewExponentialJitterBackoff(maxBackoff time.Duration) *ExponentialJitterBackoff { + return &ExponentialJitterBackoff{ + maxBackoff: maxBackoff, + maxBackoffAttempts: math.Log2( + float64(maxBackoff) / float64(time.Second)), + randFloat64: rand.CryptoRandFloat64, + } +} + +// BackoffDelay returns the duration to wait before the next attempt should be +// made. Returns an error if unable get a duration. +func (j *ExponentialJitterBackoff) BackoffDelay(attempt int, err error) (time.Duration, error) { + if attempt > int(j.maxBackoffAttempts) { + return j.maxBackoff, nil + } + + b, err := j.randFloat64() + if err != nil { + return 0, err + } + + // [0.0, 1.0) * 2 ^ attempts + ri := int64(1 << uint64(attempt)) + delaySeconds := b * float64(ri) + + return timeconv.FloatSecondsDur(delaySeconds), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go new file mode 100644 index 00000000000..7a3f1830186 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/metadata.go @@ -0,0 +1,52 @@ +package retry + +import ( + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// attemptResultsKey is a metadata accessor key to retrieve metadata +// for all request attempts. +type attemptResultsKey struct { +} + +// GetAttemptResults retrieves attempts results from middleware metadata. +func GetAttemptResults(metadata middleware.Metadata) (AttemptResults, bool) { + m, ok := metadata.Get(attemptResultsKey{}).(AttemptResults) + return m, ok +} + +// AttemptResults represents struct containing metadata returned by all request attempts. +type AttemptResults struct { + + // Results is a slice consisting attempt result from all request attempts. + // Results are stored in order request attempt is made. + Results []AttemptResult +} + +// AttemptResult represents attempt result returned by a single request attempt. +type AttemptResult struct { + + // Err is the error if received for the request attempt. + Err error + + // Retryable denotes if request may be retried. This states if an + // error is considered retryable. + Retryable bool + + // Retried indicates if this request was retried. + Retried bool + + // ResponseMetadata is any existing metadata passed via the response middlewares. + ResponseMetadata middleware.Metadata +} + +// addAttemptResults adds attempt results to middleware metadata +func addAttemptResults(metadata *middleware.Metadata, v AttemptResults) { + metadata.Set(attemptResultsKey{}, v) +} + +// GetRawResponse returns raw response recorded for the attempt result +func (a AttemptResult) GetRawResponse() interface{} { + return awsmiddle.GetRawResponse(a.ResponseMetadata) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go new file mode 100644 index 00000000000..822fc920a75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -0,0 +1,330 @@ +package retry + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/logging" + smithymiddle "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/transport/http" +) + +// RequestCloner is a function that can take an input request type and clone +// the request for use in a subsequent retry attempt. +type RequestCloner func(interface{}) interface{} + +type retryMetadata struct { + AttemptNum int + AttemptTime time.Time + MaxAttempts int + AttemptClockSkew time.Duration +} + +// Attempt is a Smithy Finalize middleware that handles retry attempts using +// the provided Retryer implementation. +type Attempt struct { + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogAttempts bool + + retryer aws.RetryerV2 + requestCloner RequestCloner +} + +// NewAttemptMiddleware returns a new Attempt retry middleware. +func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { + m := &Attempt{ + retryer: wrapAsRetryerV2(retryer), + requestCloner: requestCloner, + } + for _, fn := range optFns { + fn(m) + } + return m +} + +// ID returns the middleware identifier +func (r *Attempt) ID() string { return "Retry" } + +func (r Attempt) logf(logger logging.Logger, classification logging.Classification, format string, v ...interface{}) { + if !r.LogAttempts { + return + } + logger.Logf(classification, format, v...) +} + +// HandleFinalize utilizes the provider Retryer implementation to attempt +// retries over the next handler +func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + var attemptNum int + var attemptClockSkew time.Duration + var attemptResults AttemptResults + + maxAttempts := r.retryer.MaxAttempts() + releaseRetryToken := nopRelease + + for { + attemptNum++ + attemptInput := in + attemptInput.Request = r.requestCloner(attemptInput.Request) + + // Record the metadata for the for attempt being started. + attemptCtx := setRetryMetadata(ctx, retryMetadata{ + AttemptNum: attemptNum, + AttemptTime: sdk.NowTime().UTC(), + MaxAttempts: maxAttempts, + AttemptClockSkew: attemptClockSkew, + }) + + var attemptResult AttemptResult + out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) + attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) + + // AttemptResult Retried states that the attempt was not successful, and + // should be retried. + shouldRetry := attemptResult.Retried + + // Add attempt metadata to list of all attempt metadata + attemptResults.Results = append(attemptResults.Results, attemptResult) + + if !shouldRetry { + // Ensure the last response's metadata is used as the bases for result + // metadata returned by the stack. The Slice of attempt results + // will be added to this cloned metadata. + metadata = attemptResult.ResponseMetadata.Clone() + + break + } + } + + addAttemptResults(&metadata, attemptResults) + return out, metadata, err +} + +// handleAttempt handles an individual request attempt. +func (r *Attempt) handleAttempt( + ctx context.Context, in smithymiddle.FinalizeInput, releaseRetryToken func(error) error, next smithymiddle.FinalizeHandler, +) ( + out smithymiddle.FinalizeOutput, attemptResult AttemptResult, _ func(error) error, err error, +) { + defer func() { + attemptResult.Err = err + }() + + // Short circuit if this attempt never can succeed because the context is + // canceled. This reduces the chance of token pools being modified for + // attempts that will not be made + select { + case <-ctx.Done(): + return out, attemptResult, nopRelease, ctx.Err() + default: + } + + //------------------------------ + // Get Attempt Token + //------------------------------ + releaseAttemptToken, err := r.retryer.GetAttemptToken(ctx) + if err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to get retry Send token, %w", err) + } + + //------------------------------ + // Send Attempt + //------------------------------ + logger := smithymiddle.GetLogger(ctx) + service, operation := awsmiddle.GetServiceID(ctx), awsmiddle.GetOperationName(ctx) + retryMetadata, _ := getRetryMetadata(ctx) + attemptNum := retryMetadata.AttemptNum + maxAttempts := retryMetadata.MaxAttempts + + // Following attempts must ensure the request payload stream starts in a + // rewound state. + if attemptNum > 1 { + if rewindable, ok := in.Request.(interface{ RewindStream() error }); ok { + if rewindErr := rewindable.RewindStream(); rewindErr != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to rewind transport stream for retry, %w", rewindErr) + } + } + + r.logf(logger, logging.Debug, "retrying request %s/%s, attempt %d", + service, operation, attemptNum) + } + + var metadata smithymiddle.Metadata + out, metadata, err = next.HandleFinalize(ctx, in) + attemptResult.ResponseMetadata = metadata + + //------------------------------ + // Bookkeeping + //------------------------------ + // Release the retry token based on the state of the attempt's error (if any). + if releaseError := releaseRetryToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release retry token after request error, %w", err) + } + // Release the attempt token based on the state of the attempt's error (if any). + if releaseError := releaseAttemptToken(err); releaseError != nil && err != nil { + return out, attemptResult, nopRelease, fmt.Errorf( + "failed to release initial token after request error, %w", err) + } + // If there was no error making the attempt, nothing further to do. There + // will be nothing to retry. + if err == nil { + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Is Retryable and Should Retry + //------------------------------ + // If the attempt failed with an unretryable error, nothing further to do + // but return, and inform the caller about the terminal failure. + retryable := r.retryer.IsErrorRetryable(err) + if !retryable { + r.logf(logger, logging.Debug, "request failed with unretryable error %v", err) + return out, attemptResult, nopRelease, err + } + + // set retryable to true + attemptResult.Retryable = true + + // Once the maximum number of attempts have been exhausted there is nothing + // further to do other than inform the caller about the terminal failure. + if maxAttempts > 0 && attemptNum >= maxAttempts { + r.logf(logger, logging.Debug, "max retry attempts exhausted, max %d", maxAttempts) + err = &MaxAttemptsError{ + Attempt: attemptNum, + Err: err, + } + return out, attemptResult, nopRelease, err + } + + //------------------------------ + // Get Retry (aka Retry Quota) Token + //------------------------------ + // Get a retry token that will be released after the + releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err) + if retryTokenErr != nil { + return out, attemptResult, nopRelease, retryTokenErr + } + + //------------------------------ + // Retry Delay and Sleep + //------------------------------ + // Get the retry delay before another attempt can be made, and sleep for + // that time. Potentially early exist if the sleep is canceled via the + // context. + retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + if reqErr != nil { + return out, attemptResult, releaseRetryToken, reqErr + } + if reqErr = sdk.SleepWithContext(ctx, retryDelay); reqErr != nil { + err = &aws.RequestCanceledError{Err: reqErr} + return out, attemptResult, releaseRetryToken, err + } + + // The request should be re-attempted. + attemptResult.Retried = true + + return out, attemptResult, releaseRetryToken, err +} + +// MetricsHeader attaches SDK request metric header for retries to the transport +type MetricsHeader struct{} + +// ID returns the middleware identifier +func (r *MetricsHeader) ID() string { + return "RetryMetricsHeader" +} + +// HandleFinalize attaches the SDK request metric header to the transport layer +func (r MetricsHeader) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeInput, next smithymiddle.FinalizeHandler) ( + out smithymiddle.FinalizeOutput, metadata smithymiddle.Metadata, err error, +) { + retryMetadata, _ := getRetryMetadata(ctx) + + const retryMetricHeader = "Amz-Sdk-Request" + var parts []string + + parts = append(parts, "attempt="+strconv.Itoa(retryMetadata.AttemptNum)) + if retryMetadata.MaxAttempts != 0 { + parts = append(parts, "max="+strconv.Itoa(retryMetadata.MaxAttempts)) + } + + var ttl time.Time + if deadline, ok := ctx.Deadline(); ok { + ttl = deadline + } + + // Only append the TTL if it can be determined. + if !ttl.IsZero() && retryMetadata.AttemptClockSkew > 0 { + const unixTimeFormat = "20060102T150405Z" + ttl = ttl.Add(retryMetadata.AttemptClockSkew) + parts = append(parts, "ttl="+ttl.Format(unixTimeFormat)) + } + + switch req := in.Request.(type) { + case *http.Request: + req.Header[retryMetricHeader] = append(req.Header[retryMetricHeader][:0], strings.Join(parts, "; ")) + default: + return out, metadata, fmt.Errorf("unknown transport type %T", req) + } + + return next.HandleFinalize(ctx, in) +} + +type retryMetadataKey struct{} + +// getRetryMetadata retrieves retryMetadata from the context and a bool +// indicating if it was set. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) { + metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata) + return metadata, ok +} + +// setRetryMetadata sets the retryMetadata on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context { + return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata) +} + +// AddRetryMiddlewaresOptions is the set of options that can be passed to +// AddRetryMiddlewares for configuring retry associated middleware. +type AddRetryMiddlewaresOptions struct { + Retryer aws.Retryer + + // Enable the logging of retry attempts performed by the SDK. This will + // include logging retry attempts, unretryable errors, and when max + // attempts are reached. + LogRetryAttempts bool +} + +// AddRetryMiddlewares adds retry middleware to operation middleware stack +func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresOptions) error { + attempt := NewAttemptMiddleware(options.Retryer, http.RequestCloner, func(middleware *Attempt) { + middleware.LogAttempts = options.LogRetryAttempts + }) + + if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil { + return err + } + if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go new file mode 100644 index 00000000000..af81635b3fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retry.go @@ -0,0 +1,90 @@ +package retry + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// AddWithErrorCodes returns a Retryer with additional error codes considered +// for determining if the error should be retried. +func AddWithErrorCodes(r aws.Retryer, codes ...string) aws.Retryer { + retryable := &RetryableErrorCode{ + Codes: map[string]struct{}{}, + } + for _, c := range codes { + retryable.Codes[c] = struct{}{} + } + + return &withIsErrorRetryable{ + RetryerV2: wrapAsRetryerV2(r), + Retryable: retryable, + } +} + +type withIsErrorRetryable struct { + aws.RetryerV2 + Retryable IsErrorRetryable +} + +func (r *withIsErrorRetryable) IsErrorRetryable(err error) bool { + if v := r.Retryable.IsErrorRetryable(err); v != aws.UnknownTernary { + return v.Bool() + } + return r.RetryerV2.IsErrorRetryable(err) +} + +// AddWithMaxAttempts returns a Retryer with MaxAttempts set to the value +// specified. +func AddWithMaxAttempts(r aws.Retryer, max int) aws.Retryer { + return &withMaxAttempts{ + RetryerV2: wrapAsRetryerV2(r), + Max: max, + } +} + +type withMaxAttempts struct { + aws.RetryerV2 + Max int +} + +func (w *withMaxAttempts) MaxAttempts() int { + return w.Max +} + +// AddWithMaxBackoffDelay returns a retryer wrapping the passed in retryer +// overriding the RetryDelay behavior for a alternate minimum initial backoff +// delay. +func AddWithMaxBackoffDelay(r aws.Retryer, delay time.Duration) aws.Retryer { + return &withMaxBackoffDelay{ + RetryerV2: wrapAsRetryerV2(r), + backoff: NewExponentialJitterBackoff(delay), + } +} + +type withMaxBackoffDelay struct { + aws.RetryerV2 + backoff *ExponentialJitterBackoff +} + +func (r *withMaxBackoffDelay) RetryDelay(attempt int, err error) (time.Duration, error) { + return r.backoff.BackoffDelay(attempt, err) +} + +type wrappedAsRetryerV2 struct { + aws.Retryer +} + +func wrapAsRetryerV2(r aws.Retryer) aws.RetryerV2 { + v, ok := r.(aws.RetryerV2) + if !ok { + v = wrappedAsRetryerV2{Retryer: r} + } + + return v +} + +func (w wrappedAsRetryerV2) GetAttemptToken(context.Context) (func(error) error, error) { + return w.Retryer.GetInitialToken(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go new file mode 100644 index 00000000000..987affdde6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -0,0 +1,201 @@ +package retry + +import ( + "errors" + "net" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorRetryable provides the interface of an implementation to determine if +// a error as the result of an operation is retryable. +type IsErrorRetryable interface { + IsErrorRetryable(error) aws.Ternary +} + +// IsErrorRetryables is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorRetryables []IsErrorRetryable + +// IsErrorRetryable returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (r IsErrorRetryables) IsErrorRetryable(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorRetryable(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorRetryableFunc wraps a function with the IsErrorRetryable interface. +type IsErrorRetryableFunc func(error) aws.Ternary + +// IsErrorRetryable returns if the error is retryable. +func (fn IsErrorRetryableFunc) IsErrorRetryable(err error) aws.Ternary { + return fn(err) +} + +// RetryableError is an IsErrorRetryable implementation which uses the +// optional interface Retryable on the error value to determine if the error is +// retryable. +type RetryableError struct{} + +// IsErrorRetryable returns if the error is retryable if it satisfies the +// Retryable interface, and returns if the attempt should be retried. +func (RetryableError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ RetryableError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.RetryableError()) +} + +// NoRetryCanceledError detects if the error was an request canceled error and +// returns if so. +type NoRetryCanceledError struct{} + +// IsErrorRetryable returns the error is not retryable if the request was +// canceled. +func (NoRetryCanceledError) IsErrorRetryable(err error) aws.Ternary { + var v interface{ CanceledError() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + if v.CanceledError() { + return aws.FalseTernary + } + return aws.UnknownTernary +} + +// RetryableConnectionError determines if the underlying error is an HTTP +// connection and returns if it should be retried. +// +// Includes errors such as connection reset, connection refused, net dial, +// temporary, and timeout errors. +type RetryableConnectionError struct{} + +// IsErrorRetryable returns if the error is caused by and HTTP connection +// error, and should be retried. +func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { + if err == nil { + return aws.UnknownTernary + } + var retryable bool + + var conErr interface{ ConnectionError() bool } + var tempErr interface{ Temporary() bool } + var timeoutErr interface{ Timeout() bool } + var urlErr *url.Error + var netOpErr *net.OpError + var dnsError *net.DNSError + + if errors.As(err, &dnsError) { + // NXDOMAIN errors should not be retried + if dnsError.IsNotFound { + return aws.BoolTernary(false) + } + + // if !dnsError.Temporary(), error may or may not be temporary, + // (i.e. !Temporary() =/=> !retryable) so we should fall through to + // remaining checks + if dnsError.Temporary() { + return aws.BoolTernary(true) + } + } + + switch { + case errors.As(err, &conErr) && conErr.ConnectionError(): + retryable = true + + case strings.Contains(err.Error(), "connection reset"): + retryable = true + + case errors.As(err, &urlErr): + // Refused connections should be retried as the service may not yet be + // running on the port. Go TCP dial considers refused connections as + // not temporary. + if strings.Contains(urlErr.Error(), "connection refused") { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(urlErr)) + } + + case errors.As(err, &netOpErr): + // Network dial, or temporary network errors are always retryable. + if strings.EqualFold(netOpErr.Op, "dial") || netOpErr.Temporary() { + retryable = true + } else { + return r.IsErrorRetryable(errors.Unwrap(netOpErr)) + } + + case errors.As(err, &tempErr) && tempErr.Temporary(): + // Fallback to the generic temporary check, with temporary errors + // retryable. + retryable = true + + case errors.As(err, &timeoutErr) && timeoutErr.Timeout(): + // Fallback to the generic timeout check, with timeout errors + // retryable. + retryable = true + + default: + return aws.UnknownTernary + } + + return aws.BoolTernary(retryable) + +} + +// RetryableHTTPStatusCode provides a IsErrorRetryable based on HTTP status +// codes. +type RetryableHTTPStatusCode struct { + Codes map[int]struct{} +} + +// IsErrorRetryable return if the passed in error is retryable based on the +// HTTP status code. +func (r RetryableHTTPStatusCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ HTTPStatusCode() int } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.HTTPStatusCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} + +// RetryableErrorCode determines if an attempt should be retried based on the +// API error code. +type RetryableErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorRetryable return if the error is retryable based on the error codes. +// Returns unknown if the error doesn't have a code or it is unknown. +func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go new file mode 100644 index 00000000000..25abffc8128 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -0,0 +1,258 @@ +package retry + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws/ratelimit" +) + +// BackoffDelayer provides the interface for determining the delay to before +// another request attempt, that previously failed. +type BackoffDelayer interface { + BackoffDelay(attempt int, err error) (time.Duration, error) +} + +// BackoffDelayerFunc provides a wrapper around a function to determine the +// backoff delay of an attempt retry. +type BackoffDelayerFunc func(int, error) (time.Duration, error) + +// BackoffDelay returns the delay before attempt to retry a request. +func (fn BackoffDelayerFunc) BackoffDelay(attempt int, err error) (time.Duration, error) { + return fn(attempt, err) +} + +const ( + // DefaultMaxAttempts is the maximum of attempts for an API request + DefaultMaxAttempts int = 3 + + // DefaultMaxBackoff is the maximum back off delay between attempts + DefaultMaxBackoff time.Duration = 20 * time.Second +) + +// Default retry token quota values. +const ( + DefaultRetryRateTokens uint = 500 + DefaultRetryCost uint = 5 + DefaultRetryTimeoutCost uint = 10 + DefaultNoRetryIncrement uint = 1 +) + +// DefaultRetryableHTTPStatusCodes is the default set of HTTP status codes the SDK +// should consider as retryable errors. +var DefaultRetryableHTTPStatusCodes = map[int]struct{}{ + 500: {}, + 502: {}, + 503: {}, + 504: {}, +} + +// DefaultRetryableErrorCodes provides the set of API error codes that should +// be retried. +var DefaultRetryableErrorCodes = map[string]struct{}{ + "RequestTimeout": {}, + "RequestTimeoutException": {}, +} + +// DefaultThrottleErrorCodes provides the set of API error codes that are +// considered throttle errors. +var DefaultThrottleErrorCodes = map[string]struct{}{ + "Throttling": {}, + "ThrottlingException": {}, + "ThrottledException": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, + "ProvisionedThroughputExceededException": {}, + "TransactionInProgressException": {}, + "RequestLimitExceeded": {}, + "BandwidthLimitExceeded": {}, + "LimitExceededException": {}, + "RequestThrottled": {}, + "SlowDown": {}, + "PriorRequestNotComplete": {}, + "EC2ThrottledException": {}, +} + +// DefaultRetryables provides the set of retryable checks that are used by +// default. +var DefaultRetryables = []IsErrorRetryable{ + NoRetryCanceledError{}, + RetryableError{}, + RetryableConnectionError{}, + RetryableHTTPStatusCode{ + Codes: DefaultRetryableHTTPStatusCodes, + }, + RetryableErrorCode{ + Codes: DefaultRetryableErrorCodes, + }, + RetryableErrorCode{ + Codes: DefaultThrottleErrorCodes, + }, +} + +// DefaultTimeouts provides the set of timeout checks that are used by default. +var DefaultTimeouts = []IsErrorTimeout{ + TimeouterError{}, +} + +// StandardOptions provides the functional options for configuring the standard +// retryable, and delay behavior. +type StandardOptions struct { + // Maximum number of attempts that should be made. + MaxAttempts int + + // MaxBackoff duration between retried attempts. + MaxBackoff time.Duration + + // Provides the backoff strategy the retryer will use to determine the + // delay between retry attempts. + Backoff BackoffDelayer + + // Set of strategies to determine if the attempt should be retried based on + // the error response received. + // + // It is safe to append to this list in NewStandard's functional options. + Retryables []IsErrorRetryable + + // Set of strategies to determine if the attempt failed due to a timeout + // error. + // + // It is safe to append to this list in NewStandard's functional options. + Timeouts []IsErrorTimeout + + // Provides the rate limiting strategy for rate limiting attempt retries + // across all attempts the retryer is being used with. + RateLimiter RateLimiter + + // The cost to deduct from the RateLimiter's token bucket per retry. + RetryCost uint + + // The cost to deduct from the RateLimiter's token bucket per retry caused + // by timeout error. + RetryTimeoutCost uint + + // The cost to payback to the RateLimiter's token bucket for successful + // attempts. + NoRetryIncrement uint +} + +// RateLimiter provides the interface for limiting the rate of attempt retries +// allowed by the retryer. +type RateLimiter interface { + GetToken(ctx context.Context, cost uint) (releaseToken func() error, err error) + AddTokens(uint) error +} + +// Standard is the standard retry pattern for the SDK. It uses a set of +// retryable checks to determine of the failed attempt should be retried, and +// what retry delay should be used. +type Standard struct { + options StandardOptions + + timeout IsErrorTimeout + retryable IsErrorRetryable + backoff BackoffDelayer +} + +// NewStandard initializes a standard retry behavior with defaults that can be +// overridden via functional options. +func NewStandard(fnOpts ...func(*StandardOptions)) *Standard { + o := StandardOptions{ + MaxAttempts: DefaultMaxAttempts, + MaxBackoff: DefaultMaxBackoff, + Retryables: append([]IsErrorRetryable{}, DefaultRetryables...), + Timeouts: append([]IsErrorTimeout{}, DefaultTimeouts...), + + RateLimiter: ratelimit.NewTokenRateLimit(DefaultRetryRateTokens), + RetryCost: DefaultRetryCost, + RetryTimeoutCost: DefaultRetryTimeoutCost, + NoRetryIncrement: DefaultNoRetryIncrement, + } + for _, fn := range fnOpts { + fn(&o) + } + if o.MaxAttempts <= 0 { + o.MaxAttempts = DefaultMaxAttempts + } + + backoff := o.Backoff + if backoff == nil { + backoff = NewExponentialJitterBackoff(o.MaxBackoff) + } + + return &Standard{ + options: o, + backoff: backoff, + retryable: IsErrorRetryables(o.Retryables), + timeout: IsErrorTimeouts(o.Timeouts), + } +} + +// MaxAttempts returns the maximum number of attempts that can be made for a +// request before failing. +func (s *Standard) MaxAttempts() int { + return s.options.MaxAttempts +} + +// IsErrorRetryable returns if the error is can be retried or not. Should not +// consider the number of attempts made. +func (s *Standard) IsErrorRetryable(err error) bool { + return s.retryable.IsErrorRetryable(err).Bool() +} + +// RetryDelay returns the delay to use before another request attempt is made. +func (s *Standard) RetryDelay(attempt int, err error) (time.Duration, error) { + return s.backoff.BackoffDelay(attempt, err) +} + +// GetAttemptToken returns the token to be released after then attempt completes. +// The release token will add NoRetryIncrement to the RateLimiter token pool if +// the attempt was successful. If the attempt failed, nothing will be done. +func (s *Standard) GetAttemptToken(context.Context) (func(error) error, error) { + return s.GetInitialToken(), nil +} + +// GetInitialToken returns a token for adding the NoRetryIncrement to the +// RateLimiter token if the attempt completed successfully without error. +// +// InitialToken applies to result of the each attempt, including the first. +// Whereas the RetryToken applies to the result of subsequent attempts. +// +// Deprecated: use GetAttemptToken instead. +func (s *Standard) GetInitialToken() func(error) error { + return releaseToken(s.noRetryIncrement).release +} + +func (s *Standard) noRetryIncrement() error { + return s.options.RateLimiter.AddTokens(s.options.NoRetryIncrement) +} + +// GetRetryToken attempts to deduct the retry cost from the retry token pool. +// Returning the token release function, or error. +func (s *Standard) GetRetryToken(ctx context.Context, opErr error) (func(error) error, error) { + cost := s.options.RetryCost + + if s.timeout.IsErrorTimeout(opErr).Bool() { + cost = s.options.RetryTimeoutCost + } + + fn, err := s.options.RateLimiter.GetToken(ctx, cost) + if err != nil { + return nil, fmt.Errorf("failed to get rate limit token, %w", err) + } + + return releaseToken(fn).release, nil +} + +func nopRelease(error) error { return nil } + +type releaseToken func() error + +func (f releaseToken) release(err error) error { + if err != nil { + return nil + } + + return f() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go new file mode 100644 index 00000000000..c4b844d15f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/throttle_error.go @@ -0,0 +1,60 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorThrottle provides the interface of an implementation to determine if +// a error response from an operation is a throttling error. +type IsErrorThrottle interface { + IsErrorThrottle(error) aws.Ternary +} + +// IsErrorThrottles is a collection of checks to determine of the error a +// throttle error. Iterates through the checks and returns the state of +// throttle if any check returns something other than unknown. +type IsErrorThrottles []IsErrorThrottle + +// IsErrorThrottle returns if the error is a throttle error if any of the +// checks in the list return a value other than unknown. +func (r IsErrorThrottles) IsErrorThrottle(err error) aws.Ternary { + for _, re := range r { + if v := re.IsErrorThrottle(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorThrottleFunc wraps a function with the IsErrorThrottle interface. +type IsErrorThrottleFunc func(error) aws.Ternary + +// IsErrorThrottle returns if the error is a throttle error. +func (fn IsErrorThrottleFunc) IsErrorThrottle(err error) aws.Ternary { + return fn(err) +} + +// ThrottleErrorCode determines if an attempt should be retried based on the +// API error code. +type ThrottleErrorCode struct { + Codes map[string]struct{} +} + +// IsErrorThrottle return if the error is a throttle error based on the error +// codes. Returns unknown if the error doesn't have a code or it is unknown. +func (r ThrottleErrorCode) IsErrorThrottle(err error) aws.Ternary { + var v interface{ ErrorCode() string } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + _, ok := r.Codes[v.ErrorCode()] + if !ok { + return aws.UnknownTernary + } + + return aws.TrueTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go new file mode 100644 index 00000000000..3d47870d2dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/timeout_error.go @@ -0,0 +1,52 @@ +package retry + +import ( + "errors" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// IsErrorTimeout provides the interface of an implementation to determine if +// a error matches. +type IsErrorTimeout interface { + IsErrorTimeout(err error) aws.Ternary +} + +// IsErrorTimeouts is a collection of checks to determine of the error is +// retryable. Iterates through the checks and returns the state of retryable +// if any check returns something other than unknown. +type IsErrorTimeouts []IsErrorTimeout + +// IsErrorTimeout returns if the error is retryable if any of the checks in +// the list return a value other than unknown. +func (ts IsErrorTimeouts) IsErrorTimeout(err error) aws.Ternary { + for _, t := range ts { + if v := t.IsErrorTimeout(err); v != aws.UnknownTernary { + return v + } + } + return aws.UnknownTernary +} + +// IsErrorTimeoutFunc wraps a function with the IsErrorTimeout interface. +type IsErrorTimeoutFunc func(error) aws.Ternary + +// IsErrorTimeout returns if the error is retryable. +func (fn IsErrorTimeoutFunc) IsErrorTimeout(err error) aws.Ternary { + return fn(err) +} + +// TimeouterError provides the IsErrorTimeout implementation for determining if +// an error is a timeout based on type with the Timeout method. +type TimeouterError struct{} + +// IsErrorTimeout returns if the error is a timeout error. +func (t TimeouterError) IsErrorTimeout(err error) aws.Ternary { + var v interface{ Timeout() bool } + + if !errors.As(err, &v) { + return aws.UnknownTernary + } + + return aws.BoolTernary(v.Timeout()) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go new file mode 100644 index 00000000000..b0ba4cb2f08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -0,0 +1,127 @@ +package aws + +import ( + "context" + "fmt" + "time" +) + +// RetryMode provides the mode the API client will use to create a retryer +// based on. +type RetryMode string + +const ( + // RetryModeStandard model provides rate limited retry attempts with + // exponential backoff delay. + RetryModeStandard RetryMode = "standard" + + // RetryModeAdaptive model provides attempt send rate limiting on throttle + // responses in addition to standard mode's retry rate limiting. + // + // Adaptive retry mode is experimental and is subject to change in the + // future. + RetryModeAdaptive RetryMode = "adaptive" +) + +// ParseRetryMode attempts to parse a RetryMode from the given string. +// Returning error if the value is not a known RetryMode. +func ParseRetryMode(v string) (mode RetryMode, err error) { + switch v { + case "standard": + return RetryModeStandard, nil + case "adaptive": + return RetryModeAdaptive, nil + default: + return mode, fmt.Errorf("unknown RetryMode, %v", v) + } +} + +func (m RetryMode) String() string { return string(m) } + +// Retryer is an interface to determine if a given error from a +// attempt should be retried, and if so what backoff delay to apply. The +// default implementation used by most services is the retry package's Standard +// type. Which contains basic retry logic using exponential backoff. +type Retryer interface { + // IsErrorRetryable returns if the failed attempt is retryable. This check + // should determine if the error can be retried, or if the error is + // terminal. + IsErrorRetryable(error) bool + + // MaxAttempts returns the maximum number of attempts that can be made for + // an attempt before failing. A value of 0 implies that the attempt should + // be retried until it succeeds if the errors are retryable. + MaxAttempts() int + + // RetryDelay returns the delay that should be used before retrying the + // attempt. Will return error if the delay could not be determined. + RetryDelay(attempt int, opErr error) (time.Duration, error) + + // GetRetryToken attempts to deduct the retry cost from the retry token pool. + // Returning the token release function, or error. + GetRetryToken(ctx context.Context, opErr error) (releaseToken func(error) error, err error) + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + GetInitialToken() (releaseToken func(error) error) +} + +// RetryerV2 is an interface to determine if a given error from an attempt +// should be retried, and if so what backoff delay to apply. The default +// implementation used by most services is the retry package's Standard type. +// Which contains basic retry logic using exponential backoff. +// +// RetryerV2 replaces the Retryer interface, deprecating the GetInitialToken +// method in favor of GetAttemptToken which takes a context, and can return an error. +// +// The SDK's retry package's Attempt middleware, and utilities will always +// wrap a Retryer as a RetryerV2. Delegating to GetInitialToken, only if +// GetAttemptToken is not implemented. +type RetryerV2 interface { + Retryer + + // GetInitialToken returns the initial attempt token that can increment the + // retry token pool if the attempt is successful. + // + // Deprecated: This method does not provide a way to block using Context, + // nor can it return an error. Use RetryerV2, and GetAttemptToken instead. + GetInitialToken() (releaseToken func(error) error) + + // GetAttemptToken returns the send token that can be used to rate limit + // attempt calls. Will be used by the SDK's retry package's Attempt + // middleware to get a send token prior to calling the temp and releasing + // the send token after the attempt has been made. + GetAttemptToken(context.Context) (func(error) error, error) +} + +// NopRetryer provides a RequestRetryDecider implementation that will flag +// all attempt errors as not retryable, with a max attempts of 1. +type NopRetryer struct{} + +// IsErrorRetryable returns false for all error values. +func (NopRetryer) IsErrorRetryable(error) bool { return false } + +// MaxAttempts always returns 1 for the original attempt. +func (NopRetryer) MaxAttempts() int { return 1 } + +// RetryDelay is not valid for the NopRetryer. Will always return error. +func (NopRetryer) RetryDelay(int, error) (time.Duration, error) { + return 0, fmt.Errorf("not retrying any attempt errors") +} + +// GetRetryToken returns a stub function that does nothing. +func (NopRetryer) GetRetryToken(context.Context, error) (func(error) error, error) { + return nopReleaseToken, nil +} + +// GetInitialToken returns a stub function that does nothing. +func (NopRetryer) GetInitialToken() func(error) error { + return nopReleaseToken +} + +// GetAttemptToken returns a stub function that does nothing. +func (NopRetryer) GetAttemptToken(context.Context) (func(error) error, error) { + return nopReleaseToken, nil +} + +func nopReleaseToken(error) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go new file mode 100644 index 00000000000..3af9b2b3361 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/runtime.go @@ -0,0 +1,14 @@ +package aws + +// ExecutionEnvironmentID is the AWS execution environment runtime identifier. +type ExecutionEnvironmentID string + +// RuntimeEnvironment is a collection of values that are determined at runtime +// based on the environment that the SDK is executing in. Some of these values +// may or may not be present based on the executing environment and certain SDK +// configuration properties that drive whether these values are populated.. +type RuntimeEnvironment struct { + EnvironmentIdentifier ExecutionEnvironmentID + Region string + EC2InstanceMetadataRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go new file mode 100644 index 00000000000..cbf22f1d0b0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/cache.go @@ -0,0 +1,115 @@ +package v4 + +import ( + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +func lookupKey(service, region string) string { + var s strings.Builder + s.Grow(len(region) + len(service) + 3) + s.WriteString(region) + s.WriteRune('/') + s.WriteString(service) + return s.String() +} + +type derivedKey struct { + AccessKey string + Date time.Time + Credential []byte +} + +type derivedKeyCache struct { + values map[string]derivedKey + mutex sync.RWMutex +} + +func newDerivedKeyCache() derivedKeyCache { + return derivedKeyCache{ + values: make(map[string]derivedKey), + } +} + +func (s *derivedKeyCache) Get(credentials aws.Credentials, service, region string, signingTime SigningTime) []byte { + key := lookupKey(service, region) + s.mutex.RLock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.RUnlock() + return cred + } + s.mutex.RUnlock() + + s.mutex.Lock() + if cred, ok := s.get(key, credentials, signingTime.Time); ok { + s.mutex.Unlock() + return cred + } + cred := deriveKey(credentials.SecretAccessKey, service, region, signingTime) + entry := derivedKey{ + AccessKey: credentials.AccessKeyID, + Date: signingTime.Time, + Credential: cred, + } + s.values[key] = entry + s.mutex.Unlock() + + return cred +} + +func (s *derivedKeyCache) get(key string, credentials aws.Credentials, signingTime time.Time) ([]byte, bool) { + cacheEntry, ok := s.retrieveFromCache(key) + if ok && cacheEntry.AccessKey == credentials.AccessKeyID && isSameDay(signingTime, cacheEntry.Date) { + return cacheEntry.Credential, true + } + return nil, false +} + +func (s *derivedKeyCache) retrieveFromCache(key string) (derivedKey, bool) { + if v, ok := s.values[key]; ok { + return v, true + } + return derivedKey{}, false +} + +// SigningKeyDeriver derives a signing key from a set of credentials +type SigningKeyDeriver struct { + cache derivedKeyCache +} + +// NewSigningKeyDeriver returns a new SigningKeyDeriver +func NewSigningKeyDeriver() *SigningKeyDeriver { + return &SigningKeyDeriver{ + cache: newDerivedKeyCache(), + } +} + +// DeriveKey returns a derived signing key from the given credentials to be used with SigV4 signing. +func (k *SigningKeyDeriver) DeriveKey(credential aws.Credentials, service, region string, signingTime SigningTime) []byte { + return k.cache.Get(credential, service, region, signingTime) +} + +func deriveKey(secret, service, region string, t SigningTime) []byte { + hmacDate := HMACSHA256([]byte("AWS4"+secret), []byte(t.ShortTimeFormat())) + hmacRegion := HMACSHA256(hmacDate, []byte(region)) + hmacService := HMACSHA256(hmacRegion, []byte(service)) + return HMACSHA256(hmacService, []byte("aws4_request")) +} + +func isSameDay(x, y time.Time) bool { + xYear, xMonth, xDay := x.Date() + yYear, yMonth, yDay := y.Date() + + if xYear != yYear { + return false + } + + if xMonth != yMonth { + return false + } + + return xDay == yDay +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go new file mode 100644 index 00000000000..a23cb003bf7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/const.go @@ -0,0 +1,40 @@ +package v4 + +// Signature Version 4 (SigV4) Constants +const ( + // EmptyStringSHA256 is the hex encoded sha256 value of an empty string + EmptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` + + // UnsignedPayload indicates that the request payload body is unsigned + UnsignedPayload = "UNSIGNED-PAYLOAD" + + // AmzAlgorithmKey indicates the signing algorithm + AmzAlgorithmKey = "X-Amz-Algorithm" + + // AmzSecurityTokenKey indicates the security token to be used with temporary credentials + AmzSecurityTokenKey = "X-Amz-Security-Token" + + // AmzDateKey is the UTC timestamp for the request in the format YYYYMMDD'T'HHMMSS'Z' + AmzDateKey = "X-Amz-Date" + + // AmzCredentialKey is the access key ID and credential scope + AmzCredentialKey = "X-Amz-Credential" + + // AmzSignedHeadersKey is the set of headers signed for the request + AmzSignedHeadersKey = "X-Amz-SignedHeaders" + + // AmzSignatureKey is the query parameter to store the SigV4 signature + AmzSignatureKey = "X-Amz-Signature" + + // TimeFormat is the time format to be used in the X-Amz-Date header or query parameter + TimeFormat = "20060102T150405Z" + + // ShortTimeFormat is the shorten time format used in the credential scope + ShortTimeFormat = "20060102" + + // ContentSHAKey is the SHA256 of request body + ContentSHAKey = "X-Amz-Content-Sha256" + + // StreamingEventsPayload indicates that the request payload body is a signed event stream. + StreamingEventsPayload = "STREAMING-AWS4-HMAC-SHA256-EVENTS" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go new file mode 100644 index 00000000000..c61955ad5b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + sdkstrings "github.com/aws/aws-sdk-go-v2/internal/strings" +) + +// Rules houses a set of Rule needed for validation of a +// string value +type Rules []Rule + +// Rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that Rule +type Rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r Rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// MapRule generic Rule for maps +type MapRule map[string]struct{} + +// IsValid for the map Rule satisfies whether it exists in the map +func (m MapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// AllowList is a generic Rule for include listing +type AllowList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (w AllowList) IsValid(value string) bool { + return w.Rule.IsValid(value) +} + +// ExcludeList is a generic Rule for exclude listing +type ExcludeList struct { + Rule +} + +// IsValid for AllowList checks if the value is within the AllowList +func (b ExcludeList) IsValid(value string) bool { + return !b.Rule.IsValid(value) +} + +// Patterns is a list of strings to match against +type Patterns []string + +// IsValid for Patterns checks each pattern and returns if a match has +// been found +func (p Patterns) IsValid(value string) bool { + for _, pattern := range p { + if sdkstrings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// InclusiveRules rules allow for rules to depend on one another +type InclusiveRules []Rule + +// IsValid will return true if all rules are true +func (r InclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go new file mode 100644 index 00000000000..ca738f234b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -0,0 +1,71 @@ +package v4 + +// IgnoredHeaders is a list of headers that are ignored during signing +var IgnoredHeaders = Rules{ + ExcludeList{ + MapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Expect": struct{}{}, + }, + }, +} + +// RequiredSignedHeaders is a allow list for Build canonical headers. +var RequiredSignedHeaders = Rules{ + AllowList{ + MapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + "X-Amz-Tagging": struct{}{}, + }, + }, + Patterns{"X-Amz-Object-Lock-"}, + Patterns{"X-Amz-Meta-"}, +} + +// AllowedQueryHoisting is a allowed list for Build query headers. The boolean value +// represents whether or not it is a pattern. +var AllowedQueryHoisting = InclusiveRules{ + ExcludeList{RequiredSignedHeaders}, + Patterns{"X-Amz-"}, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go new file mode 100644 index 00000000000..e7fa7a1b1e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/hmac.go @@ -0,0 +1,13 @@ +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// HMACSHA256 computes a HMAC-SHA256 of data given the provided key. +func HMACSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go new file mode 100644 index 00000000000..bf93659a43f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/host.go @@ -0,0 +1,75 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go new file mode 100644 index 00000000000..fc7887909e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/scope.go @@ -0,0 +1,13 @@ +package v4 + +import "strings" + +// BuildCredentialScope builds the Signature Version 4 (SigV4) signing scope +func BuildCredentialScope(signingTime SigningTime, region, service string) string { + return strings.Join([]string{ + signingTime.ShortTimeFormat(), + region, + service, + "aws4_request", + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go new file mode 100644 index 00000000000..1de06a765d1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/time.go @@ -0,0 +1,36 @@ +package v4 + +import "time" + +// SigningTime provides a wrapper around a time.Time which provides cached values for SigV4 signing. +type SigningTime struct { + time.Time + timeFormat string + shortTimeFormat string +} + +// NewSigningTime creates a new SigningTime given a time.Time +func NewSigningTime(t time.Time) SigningTime { + return SigningTime{ + Time: t, + } +} + +// TimeFormat provides a time formatted in the X-Amz-Date format. +func (m *SigningTime) TimeFormat() string { + return m.format(&m.timeFormat, TimeFormat) +} + +// ShortTimeFormat provides a time formatted of 20060102. +func (m *SigningTime) ShortTimeFormat() string { + return m.format(&m.shortTimeFormat, ShortTimeFormat) +} + +func (m *SigningTime) format(target *string, format string) string { + if len(*target) > 0 { + return *target + } + v := m.Time.Format(format) + *target = v + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go new file mode 100644 index 00000000000..d025dbaa060 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go @@ -0,0 +1,80 @@ +package v4 + +import ( + "net/url" + "strings" +) + +const doubleSpace = " " + +// StripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func StripExcessSpaces(str string) string { + var j, k, l, m, spaces int + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + return str + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + return string(buf[:m]) +} + +// GetURIPath returns the escaped URI component from the provided URL. +func GetURIPath(u *url.URL) string { + var uriPath string + + if len(u.Opaque) > 0 { + const schemeSep, pathSep, queryStart = "//", "/", "?" + + opaque := u.Opaque + // Cut off the query string if present. + if idx := strings.Index(opaque, queryStart); idx >= 0 { + opaque = opaque[:idx] + } + + // Cutout the scheme separator if present. + if strings.Index(opaque, schemeSep) == 0 { + opaque = opaque[len(schemeSep):] + } + + // capture URI path starting with first path separator. + if idx := strings.Index(opaque, pathSep); idx >= 0 { + uriPath = opaque[idx:] + } + } else { + uriPath = u.EscapedPath() + } + + if len(uriPath) == 0 { + uriPath = "/" + } + + return uriPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go new file mode 100644 index 00000000000..0fb9b24e4ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -0,0 +1,408 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const computePayloadHashMiddlewareID = "ComputePayloadHash" + +// HashComputationError indicates an error occurred while computing the signing hash +type HashComputationError struct { + Err error +} + +// Error is the error message +func (e *HashComputationError) Error() string { + return fmt.Sprintf("failed to compute payload hash: %v", e.Err) +} + +// Unwrap returns the underlying error if one is set +func (e *HashComputationError) Unwrap() error { + return e.Err +} + +// SigningError indicates an error condition occurred while performing SigV4 signing +type SigningError struct { + Err error +} + +func (e *SigningError) Error() string { + return fmt.Sprintf("failed to sign request: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *SigningError) Unwrap() error { + return e.Err +} + +// UseDynamicPayloadSigningMiddleware swaps the compute payload sha256 middleware with a resolver middleware that +// switches between unsigned and signed payload based on TLS state for request. +// This middleware should not be used for AWS APIs that do not support unsigned payload signing auth. +// By default, SDK uses this middleware for known AWS APIs that support such TLS based auth selection . +// +// Usage example - +// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to +// dynamically switch between unsigned and signed payload based on TLS state for request. +func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + return err +} + +// dynamicPayloadSigningMiddleware dynamically resolves the middleware that computes and set payload sha256 middleware. +type dynamicPayloadSigningMiddleware struct { +} + +// ID returns the resolver identifier +func (m *dynamicPayloadSigningMiddleware) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets a resolver that directs to the payload sha256 compute handler. +func (m *dynamicPayloadSigningMiddleware) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + // if TLS is enabled, use unsigned payload when supported + if req.IsHTTPS() { + return (&unsignedPayload{}).HandleBuild(ctx, in, next) + } + + // else fall back to signed payload + return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) +} + +// unsignedPayload sets the SigV4 request payload hash to unsigned. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type unsignedPayload struct{} + +// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation +// middleware stack +func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&unsignedPayload{}, middleware.After) +} + +// ID returns the unsignedPayload identifier +func (m *unsignedPayload) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild sets the payload hash to be an unsigned payload +func (m *unsignedPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call). + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.UnsignedPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + return next.HandleBuild(ctx, in) +} + +// computePayloadSHA256 computes SHA256 payload hash to sign. +// +// Will not set the Unsigned Payload magic SHA value, if a SHA has already been +// stored in the context. (e.g. application pre-computed SHA256 before making +// API call). +// +// This middleware does not check the X-Amz-Content-Sha256 header, if that +// header is serialized a middleware must translate it into the context. +type computePayloadSHA256 struct{} + +// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the +// operation middleware stack +func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { + return stack.Build.Add(&computePayloadSHA256{}, middleware.After) +} + +// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the +// operation middleware stack +func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + return err +} + +// ID is the middleware name +func (m *computePayloadSHA256) ID() string { + return computePayloadHashMiddlewareID +} + +// HandleBuild compute the payload hash for the request payload +func (m *computePayloadSHA256) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + // This should not compute the content SHA256 if the value is already + // known. (e.g. application pre-computed SHA256 before making API call) + // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if + // that header is provided a middleware must translate it into the context. + if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { + return next.HandleBuild(ctx, in) + } + + hash := sha256.New() + if stream := req.GetStream(); stream != nil { + _, err = io.Copy(hash, stream) + if err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to compute payload hash, %w", err), + } + } + + if err := req.RewindStream(); err != nil { + return out, metadata, &HashComputationError{ + Err: fmt.Errorf("failed to seek body to start, %w", err), + } + } + } + + ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) + + return next.HandleBuild(ctx, in) +} + +// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the +// ComputePayloadSHA256 middleware with the UnsignedPayload middleware. +// +// Use this to disable computing the Payload SHA256 checksum and instead use +// UNSIGNED-PAYLOAD for the SHA256 value. +func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + return err +} + +// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// the Payload hash stored in the context. +type contentSHA256Header struct{} + +// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the +// operation middleware stack +func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) +} + +// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware +// from the operation middleware stack +func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { + _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + return err +} + +// ID returns the ContentSHA256HeaderMiddleware identifier +func (m *contentSHA256Header) ID() string { + return "SigV4ContentSHA256Header" +} + +// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// stored in the context. +func (m *contentSHA256Header) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &HashComputationError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) + + return next.HandleBuild(ctx, in) +} + +// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +type SignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Signer HTTPSigner + LogSigning bool +} + +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing +type SignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + signer HTTPSigner + logSigning bool +} + +// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { + return &SignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + signer: options.Signer, + logSigning: options.LogSigning, + } +} + +// ID is the SignHTTPRequestMiddleware identifier +func (s *SignHTTPRequestMiddleware) ID() string { + return "Signing" +} + +// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if !haveCredentialProvider(s.credentialsProvider) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &SigningError{Err: fmt.Errorf("unexpected request middleware type %T", in.Request)} + } + + signingName, signingRegion := awsmiddleware.GetSigningName(ctx), awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} + } + + signerOptions := []func(o *SignerOptions){ + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }, + } + + // existing DisableURIPathEscaping is equivalent in purpose + // to authentication scheme property DisableDoubleEncoding + disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) + if overridden { + signerOptions = append(signerOptions, func(o *SignerOptions) { + o.DisableURIPathEscaping = disableDoubleEncoding + }) + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) + if err != nil { + return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} + } + + ctx = awsmiddleware.SetSigningCredentials(ctx, credentials) + + return next.HandleFinalize(ctx, in) +} + +type streamingEventsPayload struct{} + +// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. +func AddStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Build.Add(&streamingEventsPayload{}, middleware.After) +} + +func (s *streamingEventsPayload) ID() string { + return computePayloadHashMiddlewareID +} + +func (s *streamingEventsPayload) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + contentSHA := GetPayloadHash(ctx) + if len(contentSHA) == 0 { + contentSHA = v4Internal.StreamingEventsPayload + } + + ctx = SetPayloadHash(ctx, contentSHA) + + return next.HandleBuild(ctx, in) +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + const authHeaderSignatureElem = "Signature=" + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func haveCredentialProvider(p aws.CredentialsProvider) bool { + if p == nil { + return false + } + + return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil)) +} + +type payloadHashKey struct{} + +// GetPayloadHash retrieves the payload hash to use for signing +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetPayloadHash(ctx context.Context) (v string) { + v, _ = middleware.GetStackValue(ctx, payloadHashKey{}).(string) + return v +} + +// SetPayloadHash sets the payload hash to be used for signing the request +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetPayloadHash(ctx context.Context, hash string) context.Context { + return middleware.WithStackValue(ctx, payloadHashKey{}, hash) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go new file mode 100644 index 00000000000..e1a06651243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/presign_middleware.go @@ -0,0 +1,127 @@ +package v4 + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go/middleware" + smithyHTTP "github.com/aws/smithy-go/transport/http" +) + +// HTTPPresigner is an interface to a SigV4 signer that can sign create a +// presigned URL for a HTTP requests. +type HTTPPresigner interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignedHTTPRequest provides the URL and signed headers that are included +// in the presigned URL. +type PresignedHTTPRequest struct { + URL string + Method string + SignedHeader http.Header +} + +// PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. +type PresignHTTPRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner HTTPPresigner + LogSigning bool +} + +// PresignHTTPRequestMiddleware provides the Finalize middleware for creating a +// presigned URL for an HTTP request. +// +// Will short circuit the middleware stack and not forward onto the next +// Finalize handler. +type PresignHTTPRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner HTTPPresigner + logSigning bool +} + +// NewPresignHTTPRequestMiddleware returns a new PresignHTTPRequestMiddleware +// initialized with the presigner. +func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { + return &PresignHTTPRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + } +} + +// ID provides the middleware ID. +func (*PresignHTTPRequestMiddleware) ID() string { return "PresignHTTPRequest" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *PresignHTTPRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyHTTP.Request) + if !ok { + return out, metadata, &SigningError{ + Err: fmt.Errorf("unexpected request middleware type %T", in.Request), + } + } + + httpReq := req.Build(ctx) + if !haveCredentialProvider(s.credentialsProvider) { + out.Result = &PresignedHTTPRequest{ + URL: httpReq.URL.String(), + Method: httpReq.Method, + SignedHeader: http.Header{}, + } + + return out, metadata, nil + } + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + payloadHash := GetPayloadHash(ctx) + if len(payloadHash) == 0 { + return out, metadata, &SigningError{ + Err: fmt.Errorf("computed payload hash missing from context"), + } + } + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + + u, h, err := s.presigner.PresignHTTP(ctx, credentials, + httpReq, payloadHash, signingName, signingRegion, sdk.NowTime(), + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedHTTPRequest{ + URL: u, + Method: httpReq.Method, + SignedHeader: h, + } + + return out, metadata, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go new file mode 100644 index 00000000000..66aa2bd6ab0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go @@ -0,0 +1,86 @@ +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "strings" + "time" +) + +// EventStreamSigner is an AWS EventStream protocol signer. +type EventStreamSigner interface { + GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) +} + +// StreamSignerOptions is the configuration options for StreamSigner. +type StreamSignerOptions struct{} + +// StreamSigner implements Signature Version 4 (SigV4) signing of event stream encoded payloads. +type StreamSigner struct { + options StreamSignerOptions + + credentials aws.Credentials + service string + region string + + prevSignature []byte + + signingKeyDeriver *v4Internal.SigningKeyDeriver +} + +// NewStreamSigner returns a new AWS EventStream protocol signer. +func NewStreamSigner(credentials aws.Credentials, service, region string, seedSignature []byte, optFns ...func(*StreamSignerOptions)) *StreamSigner { + o := StreamSignerOptions{} + + for _, fn := range optFns { + fn(&o) + } + + return &StreamSigner{ + options: o, + credentials: credentials, + service: service, + region: region, + signingKeyDeriver: v4Internal.NewSigningKeyDeriver(), + prevSignature: seedSignature, + } +} + +// GetSignature signs the provided header and payload bytes. +func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte, signingTime time.Time, optFns ...func(*StreamSignerOptions)) ([]byte, error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + prevSignature := s.prevSignature + + st := v4Internal.NewSigningTime(signingTime) + + sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st) + + scope := v4Internal.BuildCredentialScope(st, s.region, s.service) + + stringToSign := s.buildEventStreamStringToSign(headers, payload, prevSignature, scope, &st) + + signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign)) + s.prevSignature = signature + + return signature, nil +} + +func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string { + hash := sha256.New() + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + signingTime.TimeFormat(), + credentialScope, + hex.EncodeToString(previousSignature), + hex.EncodeToString(makeHash(hash, headers)), + hex.EncodeToString(makeHash(hash, payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go new file mode 100644 index 00000000000..4d162556bbf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -0,0 +1,548 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// # Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// +// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// +// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + "net/http" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/logging" +) + +const ( + signingAlgorithm = "AWS4-HMAC-SHA256" + authorizationHeader = "Authorization" +) + +// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests +type HTTPSigner interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*SignerOptions)) error +} + +type keyDerivator interface { + DeriveKey(credential aws.Credentials, service, region string, signingTime v4Internal.SigningTime) []byte +} + +// SignerOptions is the SigV4 Signer options. +type SignerOptions struct { + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // The logger to send log messages to. + Logger logging.Logger + + // Enable logging of signed requests. + // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent + // presigned URL. + LogSigning bool +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + options SignerOptions + keyDerivator keyDerivator +} + +// NewSigner returns a new SigV4 Signer +func NewSigner(optFns ...func(signer *SignerOptions)) *Signer { + options := SignerOptions{} + + for _, fn := range optFns { + fn(&options) + } + + return &Signer{options: options, keyDerivator: v4Internal.NewSigningKeyDeriver()} +} + +type httpSigner struct { + Request *http.Request + ServiceName string + Region string + Time v4Internal.SigningTime + Credentials aws.Credentials + KeyDerivator keyDerivator + IsPreSign bool + + PayloadHash string + + DisableHeaderHoisting bool + DisableURIPathEscaping bool +} + +func (s *httpSigner) Build() (signedRequest, error) { + req := s.Request + + query := req.URL.Query() + headers := req.Header + + s.setRequiredSigningFields(headers, query) + + // Sort Each Query Key's Values + for key := range query { + sort.Strings(query[key]) + } + + v4Internal.SanitizeHostForHeader(req) + + credentialScope := s.buildCredentialScope() + credentialStr := s.Credentials.AccessKeyID + "/" + credentialScope + if s.IsPreSign { + query.Set(v4Internal.AmzCredentialKey, credentialStr) + } + + unsignedHeaders := headers + if s.IsPreSign && !s.DisableHeaderHoisting { + var urlValues url.Values + urlValues, unsignedHeaders = buildQuery(v4Internal.AllowedQueryHoisting, headers) + for k := range urlValues { + query[k] = urlValues[k] + } + } + + host := req.URL.Host + if len(req.Host) > 0 { + host = req.Host + } + + signedHeaders, signedHeadersStr, canonicalHeaderStr := s.buildCanonicalHeaders(host, v4Internal.IgnoredHeaders, unsignedHeaders, s.Request.ContentLength) + + if s.IsPreSign { + query.Set(v4Internal.AmzSignedHeadersKey, signedHeadersStr) + } + + var rawQuery strings.Builder + rawQuery.WriteString(strings.Replace(query.Encode(), "+", "%20", -1)) + + canonicalURI := v4Internal.GetURIPath(req.URL) + if !s.DisableURIPathEscaping { + canonicalURI = httpbinding.EscapePath(canonicalURI, false) + } + + canonicalString := s.buildCanonicalString( + req.Method, + canonicalURI, + rawQuery.String(), + signedHeadersStr, + canonicalHeaderStr, + ) + + strToSign := s.buildStringToSign(credentialScope, canonicalString) + signingSignature, err := s.buildSignature(strToSign) + if err != nil { + return signedRequest{}, err + } + + if s.IsPreSign { + rawQuery.WriteString("&X-Amz-Signature=") + rawQuery.WriteString(signingSignature) + } else { + headers[authorizationHeader] = append(headers[authorizationHeader][:0], buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature)) + } + + req.URL.RawQuery = rawQuery.String() + + return signedRequest{ + Request: req, + SignedHeaders: signedHeaders, + CanonicalString: canonicalString, + StringToSign: strToSign, + PreSigned: s.IsPreSign, + }, nil +} + +func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature string) string { + const credential = "Credential=" + const signedHeaders = "SignedHeaders=" + const signature = "Signature=" + const commaSpace = ", " + + var parts strings.Builder + parts.Grow(len(signingAlgorithm) + 1 + + len(credential) + len(credentialStr) + 2 + + len(signedHeaders) + len(signedHeadersStr) + 2 + + len(signature) + len(signingSignature), + ) + parts.WriteString(signingAlgorithm) + parts.WriteRune(' ') + parts.WriteString(credential) + parts.WriteString(credentialStr) + parts.WriteString(commaSpace) + parts.WriteString(signedHeaders) + parts.WriteString(signedHeadersStr) + parts.WriteString(commaSpace) + parts.WriteString(signature) + parts.WriteString(signingSignature) + return parts.String() +} + +// SignHTTP signs AWS v4 requests with the provided payload hash, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The passed in request will be modified in place. +func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(options *SignerOptions)) error { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r, + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return err + } + + logSigningInfo(ctx, options, &signedRequest, false) + + return nil +} + +// PresignHTTP signs AWS v4 requests with the payload hash, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns the signed URL and the map of HTTP headers that were included in the +// signature or an error if signing the request failed. For presigned requests +// these headers and their values must be included on the HTTP request when it +// is made. This is helpful to know what header values need to be shared with +// the party the presigned request will be distributed to. +// +// The payloadHash is the hex encoded SHA-256 hash of the request payload, and +// must be provided. Even if the request has no payload (aka body). If the +// request has no payload you should use the hex encoded SHA-256 of an empty +// string as the payloadHash value. +// +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// +// Some services such as Amazon S3 accept alternative values for the payload +// hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be +// included in the request signature. +// +// https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html +// +// PresignHTTP differs from SignHTTP in that it will sign the request using +// query string instead of header values. This allows you to share the +// Presigned Request's URL with third parties, or distribute it throughout your +// system with minimal dependencies. +// +// PresignHTTP will not set the expires time of the presigned request +// automatically. To specify the expire duration for a request add the +// "X-Amz-Expires" query parameter on the request with the value as the +// duration in seconds the presigned URL should be considered valid for. This +// parameter is not used by all AWS services, and is most notable used by +// Amazon S3 APIs. +// +// expires := 20 * time.Minute +// query := req.URL.Query() +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)) +// req.URL.RawQuery = query.Encode() +// +// This method does not modify the provided request. +func (s *Signer) PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*SignerOptions), +) (signedURI string, signedHeaders http.Header, err error) { + options := s.options + + for _, fn := range optFns { + fn(&options) + } + + signer := &httpSigner{ + Request: r.Clone(r.Context()), + PayloadHash: payloadHash, + ServiceName: service, + Region: region, + Credentials: credentials, + Time: v4Internal.NewSigningTime(signingTime.UTC()), + IsPreSign: true, + DisableHeaderHoisting: options.DisableHeaderHoisting, + DisableURIPathEscaping: options.DisableURIPathEscaping, + KeyDerivator: s.keyDerivator, + } + + signedRequest, err := signer.Build() + if err != nil { + return "", nil, err + } + + logSigningInfo(ctx, options, &signedRequest, true) + + signedHeaders = make(http.Header) + + // For the signed headers we canonicalize the header keys in the returned map. + // This avoids situations where can standard library double headers like host header. For example the standard + // library will set the Host header, even if it is present in lower-case form. + for k, v := range signedRequest.SignedHeaders { + key := textproto.CanonicalMIMEHeaderKey(k) + signedHeaders[key] = append(signedHeaders[key], v...) + } + + return signedRequest.Request.URL.String(), signedHeaders, nil +} + +func (s *httpSigner) buildCredentialScope() string { + return v4Internal.BuildCredentialScope(s.Time, s.Region, s.ServiceName) +} + +func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} + +func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, header http.Header, length int64) (signed http.Header, signedHeaders, canonicalHeadersStr string) { + signed = make(http.Header) + + var headers []string + const hostHeader = "host" + headers = append(headers, hostHeader) + signed[hostHeader] = append(signed[hostHeader], host) + + const contentLengthHeader = "content-length" + if length > 0 { + headers = append(headers, contentLengthHeader) + signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) + } + + for k, v := range header { + if !rule.IsValid(k) { + continue // ignored header + } + if strings.EqualFold(k, contentLengthHeader) { + // prevent signing already handled content-length header. + continue + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := signed[lowerCaseKey]; ok { + // include additional values + signed[lowerCaseKey] = append(signed[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + signed[lowerCaseKey] = v + } + sort.Strings(headers) + + signedHeaders = strings.Join(headers, ";") + + var canonicalHeaders strings.Builder + n := len(headers) + const colon = ':' + for i := 0; i < n; i++ { + if headers[i] == hostHeader { + canonicalHeaders.WriteString(hostHeader) + canonicalHeaders.WriteRune(colon) + canonicalHeaders.WriteString(v4Internal.StripExcessSpaces(host)) + } else { + canonicalHeaders.WriteString(headers[i]) + canonicalHeaders.WriteRune(colon) + // Trim out leading, trailing, and dedup inner spaces from signed header values. + values := signed[headers[i]] + for j, v := range values { + cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v)) + canonicalHeaders.WriteString(cleanedValue) + if j < len(values)-1 { + canonicalHeaders.WriteRune(',') + } + } + } + canonicalHeaders.WriteRune('\n') + } + canonicalHeadersStr = canonicalHeaders.String() + + return signed, signedHeaders, canonicalHeadersStr +} + +func (s *httpSigner) buildCanonicalString(method, uri, query, signedHeaders, canonicalHeaders string) string { + return strings.Join([]string{ + method, + uri, + query, + canonicalHeaders, + signedHeaders, + s.PayloadHash, + }, "\n") +} + +func (s *httpSigner) buildStringToSign(credentialScope, canonicalRequestString string) string { + return strings.Join([]string{ + signingAlgorithm, + s.Time.TimeFormat(), + credentialScope, + hex.EncodeToString(makeHash(sha256.New(), []byte(canonicalRequestString))), + }, "\n") +} + +func makeHash(hash hash.Hash, b []byte) []byte { + hash.Reset() + hash.Write(b) + return hash.Sum(nil) +} + +func (s *httpSigner) buildSignature(strToSign string) (string, error) { + key := s.KeyDerivator.DeriveKey(s.Credentials, s.ServiceName, s.Region, s.Time) + return hex.EncodeToString(v4Internal.HMACSHA256(key, []byte(strToSign))), nil +} + +func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Values) { + amzDate := s.Time.TimeFormat() + + if s.IsPreSign { + query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) + if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + query.Set("X-Amz-Security-Token", sessionToken) + } + + query.Set(v4Internal.AmzDateKey, amzDate) + return + } + + headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) + + if len(s.Credentials.SessionToken) > 0 { + headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) + } +} + +func logSigningInfo(ctx context.Context, options SignerOptions, request *signedRequest, isPresign bool) { + if !options.LogSigning { + return + } + signedURLMsg := "" + if isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, request.Request.URL.String()) + } + logger := logging.WithContext(ctx, options.Logger) + logger.Logf(logging.Debug, logSignInfoMsg, request.CanonicalString, request.StringToSign, signedURLMsg) +} + +type signedRequest struct { + Request *http.Request + SignedHeaders http.Header + CanonicalString string + StringToSign string + PreSigned bool +} + +const logSignInfoMsg = `Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go new file mode 100644 index 00000000000..f3fc4d610dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/to_ptr.go @@ -0,0 +1,297 @@ +// Code generated by aws/generate.go DO NOT EDIT. + +package aws + +import ( + "github.com/aws/smithy-go/ptr" + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return ptr.Bool(v) +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + return ptr.BoolSlice(vs) +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + return ptr.BoolMap(vs) +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return ptr.Byte(v) +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + return ptr.ByteSlice(vs) +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + return ptr.ByteMap(vs) +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return ptr.String(v) +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + return ptr.StringSlice(vs) +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + return ptr.StringMap(vs) +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return ptr.Int(v) +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + return ptr.IntSlice(vs) +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + return ptr.IntMap(vs) +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return ptr.Int8(v) +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + return ptr.Int8Slice(vs) +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + return ptr.Int8Map(vs) +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return ptr.Int16(v) +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + return ptr.Int16Slice(vs) +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + return ptr.Int16Map(vs) +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return ptr.Int32(v) +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + return ptr.Int32Slice(vs) +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + return ptr.Int32Map(vs) +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return ptr.Int64(v) +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + return ptr.Int64Slice(vs) +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + return ptr.Int64Map(vs) +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return ptr.Uint(v) +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + return ptr.UintSlice(vs) +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + return ptr.UintMap(vs) +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return ptr.Uint8(v) +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + return ptr.Uint8Slice(vs) +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + return ptr.Uint8Map(vs) +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return ptr.Uint16(v) +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + return ptr.Uint16Slice(vs) +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + return ptr.Uint16Map(vs) +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return ptr.Uint32(v) +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + return ptr.Uint32Slice(vs) +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + return ptr.Uint32Map(vs) +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return ptr.Uint64(v) +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + return ptr.Uint64Slice(vs) +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + return ptr.Uint64Map(vs) +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return ptr.Float32(v) +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + return ptr.Float32Slice(vs) +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + return ptr.Float32Map(vs) +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return ptr.Float64(v) +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + return ptr.Float64Slice(vs) +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + return ptr.Float64Map(vs) +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return ptr.Time(v) +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + return ptr.TimeSlice(vs) +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + return ptr.TimeMap(vs) +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return ptr.Duration(v) +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + return ptr.DurationSlice(vs) +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + return ptr.DurationMap(vs) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go new file mode 100644 index 00000000000..26d90719b2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go @@ -0,0 +1,310 @@ +package http + +import ( + "crypto/tls" + "github.com/aws/aws-sdk-go-v2/aws" + "net" + "net/http" + "reflect" + "sync" + "time" +) + +// Defaults for the HTTPTransportBuilder. +var ( + // Default connection pool options + DefaultHTTPTransportMaxIdleConns = 100 + DefaultHTTPTransportMaxIdleConnsPerHost = 10 + + // Default connection timeouts + DefaultHTTPTransportIdleConnTimeout = 90 * time.Second + DefaultHTTPTransportTLSHandleshakeTimeout = 10 * time.Second + DefaultHTTPTransportExpectContinueTimeout = 1 * time.Second + + // Default to TLS 1.2 for all HTTPS requests. + DefaultHTTPTransportTLSMinVersion uint16 = tls.VersionTLS12 +) + +// Timeouts for net.Dialer's network connection. +var ( + DefaultDialConnectTimeout = 30 * time.Second + DefaultDialKeepAliveTimeout = 30 * time.Second +) + +// BuildableClient provides a HTTPClient implementation with options to +// create copies of the HTTPClient when additional configuration is provided. +// +// The client's methods will not share the http.Transport value between copies +// of the BuildableClient. Only exported member values of the Transport and +// optional Dialer will be copied between copies of BuildableClient. +type BuildableClient struct { + transport *http.Transport + dialer *net.Dialer + + initOnce sync.Once + + clientTimeout time.Duration + client *http.Client +} + +// NewBuildableClient returns an initialized client for invoking HTTP +// requests. +func NewBuildableClient() *BuildableClient { + return &BuildableClient{} +} + +// Do implements the HTTPClient interface's Do method to invoke a HTTP request, +// and receive the response. Uses the BuildableClient's current +// configuration to invoke the http.Request. +// +// If connection pooling is enabled (aka HTTP KeepAlive) the client will only +// share pooled connections with its own instance. Copies of the +// BuildableClient will have their own connection pools. +// +// Redirect (3xx) responses will not be followed, the HTTP response received +// will returned instead. +func (b *BuildableClient) Do(req *http.Request) (*http.Response, error) { + b.initOnce.Do(b.build) + + return b.client.Do(req) +} + +// Freeze returns a frozen aws.HTTPClient implementation that is no longer a BuildableClient. +// Use this to prevent the SDK from applying DefaultMode configuration values to a buildable client. +func (b *BuildableClient) Freeze() aws.HTTPClient { + cpy := b.clone() + cpy.build() + return cpy.client +} + +func (b *BuildableClient) build() { + b.client = wrapWithLimitedRedirect(&http.Client{ + Timeout: b.clientTimeout, + Transport: b.GetTransport(), + }) +} + +func (b *BuildableClient) clone() *BuildableClient { + cpy := NewBuildableClient() + cpy.transport = b.GetTransport() + cpy.dialer = b.GetDialer() + cpy.clientTimeout = b.clientTimeout + + return cpy +} + +// WithTransportOptions copies the BuildableClient and returns it with the +// http.Transport options applied. +// +// If a non (*http.Transport) was set as the round tripper, the round tripper +// will be replaced with a default Transport value before invoking the option +// functions. +func (b *BuildableClient) WithTransportOptions(opts ...func(*http.Transport)) *BuildableClient { + cpy := b.clone() + + tr := cpy.GetTransport() + for _, opt := range opts { + opt(tr) + } + cpy.transport = tr + + return cpy +} + +// WithDialerOptions copies the BuildableClient and returns it with the +// net.Dialer options applied. Will set the client's http.Transport DialContext +// member. +func (b *BuildableClient) WithDialerOptions(opts ...func(*net.Dialer)) *BuildableClient { + cpy := b.clone() + + dialer := cpy.GetDialer() + for _, opt := range opts { + opt(dialer) + } + cpy.dialer = dialer + + tr := cpy.GetTransport() + tr.DialContext = cpy.dialer.DialContext + cpy.transport = tr + + return cpy +} + +// WithTimeout Sets the timeout used by the client for all requests. +func (b *BuildableClient) WithTimeout(timeout time.Duration) *BuildableClient { + cpy := b.clone() + cpy.clientTimeout = timeout + return cpy +} + +// GetTransport returns a copy of the client's HTTP Transport. +func (b *BuildableClient) GetTransport() *http.Transport { + var tr *http.Transport + if b.transport != nil { + tr = b.transport.Clone() + } else { + tr = defaultHTTPTransport() + } + + return tr +} + +// GetDialer returns a copy of the client's network dialer. +func (b *BuildableClient) GetDialer() *net.Dialer { + var dialer *net.Dialer + if b.dialer != nil { + dialer = shallowCopyStruct(b.dialer).(*net.Dialer) + } else { + dialer = defaultDialer() + } + + return dialer +} + +// GetTimeout returns a copy of the client's timeout to cancel requests with. +func (b *BuildableClient) GetTimeout() time.Duration { + return b.clientTimeout +} + +func defaultDialer() *net.Dialer { + return &net.Dialer{ + Timeout: DefaultDialConnectTimeout, + KeepAlive: DefaultDialKeepAliveTimeout, + DualStack: true, + } +} + +func defaultHTTPTransport() *http.Transport { + dialer := defaultDialer() + + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout, + MaxIdleConns: DefaultHTTPTransportMaxIdleConns, + MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost, + IdleConnTimeout: DefaultHTTPTransportIdleConnTimeout, + ExpectContinueTimeout: DefaultHTTPTransportExpectContinueTimeout, + ForceAttemptHTTP2: true, + TLSClientConfig: &tls.Config{ + MinVersion: DefaultHTTPTransportTLSMinVersion, + }, + } + + return tr +} + +// shallowCopyStruct creates a shallow copy of the passed in source struct, and +// returns that copy of the same struct type. +func shallowCopyStruct(src interface{}) interface{} { + srcVal := reflect.ValueOf(src) + srcValType := srcVal.Type() + + var returnAsPtr bool + if srcValType.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + srcValType = srcValType.Elem() + returnAsPtr = true + } + dstVal := reflect.New(srcValType).Elem() + + for i := 0; i < srcValType.NumField(); i++ { + ft := srcValType.Field(i) + if len(ft.PkgPath) != 0 { + // unexported fields have a PkgPath + continue + } + + dstVal.Field(i).Set(srcVal.Field(i)) + } + + if returnAsPtr { + dstVal = dstVal.Addr() + } + + return dstVal.Interface() +} + +// wrapWithLimitedRedirect updates the Client's Transport and CheckRedirect to +// not follow any redirect other than 307 and 308. No other redirect will be +// followed. +// +// If the client does not have a Transport defined will use a new SDK default +// http.Transport configuration. +func wrapWithLimitedRedirect(c *http.Client) *http.Client { + tr := c.Transport + if tr == nil { + tr = defaultHTTPTransport() + } + + cc := *c + cc.CheckRedirect = limitedRedirect + cc.Transport = suppressBadHTTPRedirectTransport{ + tr: tr, + } + + return &cc +} + +// limitedRedirect is a CheckRedirect that prevents the client from following +// any non 307/308 HTTP status code redirects. +// +// The 307 and 308 redirects are allowed because the client must use the +// original HTTP method for the redirected to location. Whereas 301 and 302 +// allow the client to switch to GET for the redirect. +// +// Suppresses all redirect requests with a URL of badHTTPRedirectLocation. +func limitedRedirect(r *http.Request, via []*http.Request) error { + // Request.Response, in CheckRedirect is the response that is triggering + // the redirect. + resp := r.Response + if r.URL.String() == badHTTPRedirectLocation { + resp.Header.Del(badHTTPRedirectLocation) + return http.ErrUseLastResponse + } + + switch resp.StatusCode { + case 307, 308: + // Only allow 307 and 308 redirects as they preserve the method. + return nil + } + + return http.ErrUseLastResponse +} + +// suppressBadHTTPRedirectTransport provides an http.RoundTripper +// implementation that wraps another http.RoundTripper to prevent HTTP client +// receiving 301 and 302 HTTP responses redirects without the required location +// header. +// +// Clients using this utility must have a CheckRedirect, e.g. limitedRedirect, +// that check for responses with having a URL of baseHTTPRedirectLocation, and +// suppress the redirect. +type suppressBadHTTPRedirectTransport struct { + tr http.RoundTripper +} + +const badHTTPRedirectLocation = `https://amazonaws.com/badhttpredirectlocation` + +// RoundTrip backfills a stub location when a 301/302 response is received +// without a location. This stub location is used by limitedRedirect to prevent +// the HTTP client from failing attempting to use follow a redirect without a +// location value. +func (t suppressBadHTTPRedirectTransport) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := t.tr.RoundTrip(r) + if err != nil { + return resp, err + } + + // S3 is the only known service to return 301 without location header. + // The Go standard library HTTP client will return an opaque error if it + // tries to follow a 301/302 response missing the location header. + switch resp.StatusCode { + case 301, 302: + if v := resp.Header.Get("Location"); len(v) == 0 { + resp.Header.Set("Location", badHTTPRedirectLocation) + } + } + + return resp, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go new file mode 100644 index 00000000000..556f54a7f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/content_type.go @@ -0,0 +1,42 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// removeContentTypeHeader is a build middleware that removes +// content type header if content-length header is unset or +// is set to zero, +type removeContentTypeHeader struct { +} + +// ID the name of the middleware. +func (m *removeContentTypeHeader) ID() string { + return "RemoveContentTypeHeader" +} + +// HandleBuild adds or appends the constructed user agent to the request. +func (m *removeContentTypeHeader) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + // remove contentTypeHeader when content-length is zero + if req.ContentLength == 0 { + req.Header.Del("content-type") + } + + return next.HandleBuild(ctx, in) +} + +// RemoveContentTypeHeader removes content-type header if +// content length is unset or equal to zero. +func RemoveContentTypeHeader(stack *middleware.Stack) error { + return stack.Build.Add(&removeContentTypeHeader{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go new file mode 100644 index 00000000000..44651c9902d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error.go @@ -0,0 +1,33 @@ +package http + +import ( + "errors" + "fmt" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ResponseError provides the HTTP centric error type wrapping the underlying error +// with the HTTP response value and the deserialized RequestID. +type ResponseError struct { + *smithyhttp.ResponseError + + // RequestID associated with response error + RequestID string +} + +// ServiceRequestID returns the request id associated with Response Error +func (e *ResponseError) ServiceRequestID() string { return e.RequestID } + +// Error returns the formatted error +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "https response error StatusCode: %d, RequestID: %s, %v", + e.Response.StatusCode, e.RequestID, e.Err) +} + +// As populates target and returns true if the type of target is a error type that +// the ResponseError embeds, (e.g.AWS HTTP ResponseError) +func (e *ResponseError) As(target interface{}) bool { + return errors.As(e.ResponseError, target) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go new file mode 100644 index 00000000000..8fd14cecd23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -0,0 +1,54 @@ +package http + +import ( + "context" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// AddResponseErrorMiddleware adds response error wrapper middleware +func AddResponseErrorMiddleware(stack *middleware.Stack) error { + // add error wrapper middleware before request id retriever middleware so that it can wrap the error response + // returned by operation deserializers + return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) +} + +type responseErrorWrapper struct { +} + +// ID returns the middleware identifier +func (m *responseErrorWrapper) ID() string { + return "ResponseErrorWrapper" +} + +func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err == nil { + // Nothing to do when there is no error. + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + // No raw response to wrap with. + return out, metadata, err + } + + // look for request id in metadata + reqID, _ := awsmiddleware.GetRequestIDMetadata(metadata) + + // Wrap the returned smithy error with the request id retrieved from the metadata + err = &ResponseError{ + ResponseError: &smithyhttp.ResponseError{ + Response: resp, + Err: err, + }, + RequestID: reqID, + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go new file mode 100644 index 00000000000..993929bd9b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/timeout_read_closer.go @@ -0,0 +1,104 @@ +package http + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type readResult struct { + n int + err error +} + +// ResponseTimeoutError is an error when the reads from the response are +// delayed longer than the timeout the read was configured for. +type ResponseTimeoutError struct { + TimeoutDur time.Duration +} + +// Timeout returns that the error is was caused by a timeout, and can be +// retried. +func (*ResponseTimeoutError) Timeout() bool { return true } + +func (e *ResponseTimeoutError) Error() string { + return fmt.Sprintf("read on body reach timeout limit, %v", e.TimeoutDur) +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, &ResponseTimeoutError{TimeoutDur: r.duration} + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +// AddResponseReadTimeoutMiddleware adds a middleware to the stack that wraps the +// response body so that a read that takes too long will return an error. +func AddResponseReadTimeoutMiddleware(stack *middleware.Stack, duration time.Duration) error { + return stack.Deserialize.Add(&readTimeout{duration: duration}, middleware.After) +} + +// readTimeout wraps the response body with a timeoutReadCloser +type readTimeout struct { + duration time.Duration +} + +// ID returns the id of the middleware +func (*readTimeout) ID() string { + return "ReadResponseTimeout" +} + +// HandleDeserialize implements the DeserializeMiddleware interface +func (m *readTimeout) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + response.Body = &timeoutReadCloser{ + reader: response.Body, + duration: m.duration, + } + out.RawResponse = response + + return out, metadata, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go new file mode 100644 index 00000000000..cc3ae811402 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/types.go @@ -0,0 +1,42 @@ +package aws + +import ( + "fmt" +) + +// Ternary is an enum allowing an unknown or none state in addition to a bool's +// true and false. +type Ternary int + +func (t Ternary) String() string { + switch t { + case UnknownTernary: + return "unknown" + case FalseTernary: + return "false" + case TrueTernary: + return "true" + default: + return fmt.Sprintf("unknown value, %d", int(t)) + } +} + +// Bool returns true if the value is TrueTernary, false otherwise. +func (t Ternary) Bool() bool { + return t == TrueTernary +} + +// Enumerations for the values of the Ternary type. +const ( + UnknownTernary Ternary = iota + FalseTernary + TrueTernary +) + +// BoolTernary returns a true or false Ternary value for the bool provided. +func BoolTernary(v bool) Ternary { + if v { + return TrueTernary + } + return FalseTernary +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go new file mode 100644 index 00000000000..5f729d45e1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go-v2" + +// SDKVersion is the version of this SDK +const SDKVersion = goModuleVersion diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md new file mode 100644 index 00000000000..cbe4ec7e50b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -0,0 +1,479 @@ +# v1.24.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2023-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2023-11-06) + +* No change notes available for this release. + +# v1.22.0 (2023-11-02) + +* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-10-24) + +* No change notes available for this release. + +# v1.19.0 (2023-10-16) + +* **Feature**: Modify logic of retrieving user agent appID from env config + +# v1.18.45 (2023-10-12) + +* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.43 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.42 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.41 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.40 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.39 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.38 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.37 (2023-08-23) + +* No change notes available for this release. + +# v1.18.36 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.35 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.34 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.33 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.32 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.29 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.28 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.27 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.25 (2023-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.24 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.23 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.22 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.21 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.20 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.19 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.18 (2023-03-16) + +* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015. + +# v1.18.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2023-02-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-08-14) + +* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. + +# v1.16.1 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-08-10) + +* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. + +# v1.15.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2022-05-09) + +* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-02-24) + +* **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options. +* **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-01-28) + +* **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR. +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. +* **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-01-07) + +* **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-12-02) + +* **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-09-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-09-02) + +* **Feature**: Add support for S3 Multi-Region Access Point ARNs. + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: Adds configuration setting for enabling endpoint discovery. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-20) + +* **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile. +* **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/config/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go new file mode 100644 index 00000000000..dfe62973221 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -0,0 +1,213 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// defaultAWSConfigResolvers are a slice of functions that will resolve external +// configuration values into AWS configuration values. +// +// This will setup the AWS configuration's Region, +var defaultAWSConfigResolvers = []awsConfigResolver{ + // Resolves the default configuration the SDK's aws.Config will be + // initialized with. + resolveDefaultAWSConfig, + + // Sets the logger to be used. Could be user provided logger, and client + // logging mode. + resolveLogger, + resolveClientLogMode, + + // Sets the HTTP client and configuration to use for making requests using + // the HTTP transport. + resolveHTTPClient, + resolveCustomCABundle, + + // Sets the endpoint resolving behavior the API Clients will use for making + // requests to. Clients default to their own clients this allows overrides + // to be specified. The resolveEndpointResolver option is deprecated, but + // we still need to set it for backwards compatibility on config + // construction. + resolveEndpointResolver, + resolveEndpointResolverWithOptions, + + // Sets the retry behavior API clients will use within their retry attempt + // middleware. Defaults to unset, allowing API clients to define their own + // retry behavior. + resolveRetryer, + + // Sets the region the API Clients should use for making requests to. + resolveRegion, + resolveEC2IMDSRegion, + resolveDefaultRegion, + + // Sets the additional set of middleware stack mutators that will custom + // API client request pipeline middleware. + resolveAPIOptions, + + // Resolves the DefaultsMode that should be used by SDK clients. If this + // mode is set to DefaultsModeAuto. + // + // Comes after HTTPClient and CustomCABundle to ensure the HTTP client is + // configured if provided before invoking IMDS if mode is auto. Comes + // before resolving credentials so that those subsequent clients use the + // configured auto mode. + resolveDefaultsModeOptions, + + // Sets the resolved credentials the API clients will use for + // authentication. Provides the SDK's default credential chain. + // + // Should probably be the last step in the resolve chain to ensure that all + // other configurations are resolved first in case downstream credentials + // implementations depend on or can be configured with earlier resolved + // configuration options. + resolveCredentials, + + // Sets the resolved bearer authentication token API clients will use for + // httpBearerAuth authentication scheme. + resolveBearerAuthToken, + + // Sets the sdk app ID if present in shared config profile + resolveAppID, + + resolveBaseEndpoint, +} + +// A Config represents a generic configuration value or set of values. This type +// will be used by the AWSConfigResolvers to extract +// +// General the Config type will use type assertion against the Provider interfaces +// to extract specific data from the Config. +type Config interface{} + +// A loader is used to load external configuration data and returns it as +// a generic Config type. +// +// The loader should return an error if it fails to load the external configuration +// or the configuration data is malformed, or required components missing. +type loader func(context.Context, configs) (Config, error) + +// An awsConfigResolver will extract configuration data from the configs slice +// using the provider interfaces to extract specific functionality. The extracted +// configuration values will be written to the AWS Config value. +// +// The resolver should return an error if it it fails to extract the data, the +// data is malformed, or incomplete. +type awsConfigResolver func(ctx context.Context, cfg *aws.Config, configs configs) error + +// configs is a slice of Config values. These values will be used by the +// AWSConfigResolvers to extract external configuration values to populate the +// AWS Config type. +// +// Use AppendFromLoaders to add additional external Config values that are +// loaded from external sources. +// +// Use ResolveAWSConfig after external Config values have been added or loaded +// to extract the loaded configuration values into the AWS Config. +type configs []Config + +// AppendFromLoaders iterates over the slice of loaders passed in calling each +// loader function in order. The external config value returned by the loader +// will be added to the returned configs slice. +// +// If a loader returns an error this method will stop iterating and return +// that error. +func (cs configs) AppendFromLoaders(ctx context.Context, loaders []loader) (configs, error) { + for _, fn := range loaders { + cfg, err := fn(ctx, cs) + if err != nil { + return nil, err + } + + cs = append(cs, cfg) + } + + return cs, nil +} + +// ResolveAWSConfig returns a AWS configuration populated with values by calling +// the resolvers slice passed in. Each resolver is called in order. Any resolver +// may overwrite the AWS Configuration value of a previous resolver. +// +// If an resolver returns an error this method will return that error, and stop +// iterating over the resolvers. +func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigResolver) (aws.Config, error) { + var cfg aws.Config + + for _, fn := range resolvers { + if err := fn(ctx, &cfg, cs); err != nil { + return aws.Config{}, err + } + } + + return cfg, nil +} + +// ResolveConfig calls the provide function passing slice of configuration sources. +// This implements the aws.ConfigResolver interface. +func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { + var cfgs []interface{} + for i := range cs { + cfgs = append(cfgs, cs[i]) + } + return f(cfgs) +} + +// LoadDefaultConfig reads the SDK's default external configurations, and +// populates an AWS Config with the values from the external configurations. +// +// An optional variadic set of additional Config values can be provided as input +// that will be prepended to the configs slice. Use this to add custom configuration. +// The custom configurations must satisfy the respective providers for their data +// or the custom data will be ignored by the resolvers and config loaders. +// +// cfg, err := config.LoadDefaultConfig( context.TODO(), +// config.WithSharedConfigProfile("test-profile"), +// ) +// if err != nil { +// panic(fmt.Sprintf("failed loading config, %v", err)) +// } +// +// The default configuration sources are: +// * Environment Variables +// * Shared Configuration and Shared Credentials files. +func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) (cfg aws.Config, err error) { + var options LoadOptions + for _, optFn := range optFns { + if err := optFn(&options); err != nil { + return aws.Config{}, err + } + } + + // assign Load Options to configs + var cfgCpy = configs{options} + + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) + if err != nil { + return aws.Config{}, err + } + + cfg, err = cfgCpy.ResolveAWSConfig(ctx, defaultAWSConfigResolvers) + if err != nil { + return aws.Config{}, err + } + + return cfg, nil +} + +func resolveConfigLoaders(options *LoadOptions) []loader { + loaders := make([]loader, 2) + loaders[0] = loadEnvConfig + + // specification of a profile should cause a load failure if it doesn't exist + if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { + loaders[1] = loadSharedConfig + } else { + loaders[1] = loadSharedConfigIgnoreNotExist + } + + return loaders +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go new file mode 100644 index 00000000000..20b66367ffd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/defaultsmode.go @@ -0,0 +1,47 @@ +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +const execEnvVar = "AWS_EXECUTION_ENV" + +// DefaultsModeOptions is the set of options that are used to configure +type DefaultsModeOptions struct { + // The SDK configuration defaults mode. Defaults to legacy if not specified. + // + // Supported modes are: auto, cross-region, in-region, legacy, mobile, standard + Mode aws.DefaultsMode + + // The EC2 Instance Metadata Client that should be used when performing environment + // discovery when aws.DefaultsModeAuto is set. + // + // If not specified the SDK will construct a client if the instance metadata service has not been disabled by + // the AWS_EC2_METADATA_DISABLED environment variable. + IMDSClient *imds.Client +} + +func resolveDefaultsModeRuntimeEnvironment(ctx context.Context, envConfig *EnvConfig, client *imds.Client) (aws.RuntimeEnvironment, error) { + getRegionOutput, err := client.GetRegion(ctx, &imds.GetRegionInput{}) + // honor context timeouts, but if we couldn't talk to IMDS don't fail runtime environment introspection. + select { + case <-ctx.Done(): + return aws.RuntimeEnvironment{}, err + default: + } + + var imdsRegion string + if err == nil { + imdsRegion = getRegionOutput.Region + } + + return aws.RuntimeEnvironment{ + EnvironmentIdentifier: aws.ExecutionEnvironmentID(os.Getenv(execEnvVar)), + Region: envConfig.Region, + EC2InstanceMetadataRegion: imdsRegion, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go new file mode 100644 index 00000000000..aab7164e283 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go @@ -0,0 +1,20 @@ +// Package config provides utilities for loading configuration from multiple +// sources that can be used to configure the SDK's API clients, and utilities. +// +// The config package will load configuration from environment variables, AWS +// shared configuration file (~/.aws/config), and AWS shared credentials file +// (~/.aws/credentials). +// +// Use the LoadDefaultConfig to load configuration from all the SDK's supported +// sources, and resolve credentials using the SDK's default credential chain. +// +// LoadDefaultConfig allows for a variadic list of additional Config sources that can +// provide one or more configuration values which can be used to programmatically control the resolution +// of a specific value, or allow for broader range of additional configuration sources not supported by the SDK. +// A Config source implements one or more provider interfaces defined in this package. Config sources passed in will +// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources +// implement the same provider interface, priority will be handled by the order in which the sources were passed in. +// +// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider +// interface. These helpers should be used for overriding configuration programmatically at runtime. +package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go new file mode 100644 index 00000000000..78bc1493372 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -0,0 +1,738 @@ +package config + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" +) + +// CredentialsSourceName provides a name of the provider when config is +// loaded from environment. +const CredentialsSourceName = "EnvConfigCredentials" + +// Environment variables that will be read for configuration values. +const ( + awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID" + awsAccessKeyEnvVar = "AWS_ACCESS_KEY" + + awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY" + awsSecretKeyEnvVar = "AWS_SECRET_KEY" + + awsSessionTokenEnvVar = "AWS_SESSION_TOKEN" + + awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" + awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + + awsRegionEnvVar = "AWS_REGION" + awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION" + + awsProfileEnvVar = "AWS_PROFILE" + awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE" + + awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" + + awsConfigFileEnvVar = "AWS_CONFIG_FILE" + + awsCustomCABundleEnvVar = "AWS_CA_BUNDLE" + + awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE" + + awsRoleARNEnvVar = "AWS_ROLE_ARN" + awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME" + + awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY" + + awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION" + + awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE" + + awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" + + awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" + + awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT" + + awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT" + + awsDefaultMode = "AWS_DEFAULTS_MODE" + + awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" + awsRetryMode = "AWS_RETRY_MODE" + awsSdkAppID = "AWS_SDK_UA_APP_ID" + + awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURL = "AWS_ENDPOINT_URL" +) + +var ( + credAccessEnvKeys = []string{ + awsAccessKeyIDEnvVar, + awsAccessKeyEnvVar, + } + credSecretEnvKeys = []string{ + awsSecretAccessKeyEnvVar, + awsSecretKeyEnvVar, + } + regionEnvKeys = []string{ + awsRegionEnvVar, + awsDefaultRegionEnvVar, + } + profileEnvKeys = []string{ + awsProfileEnvVar, + awsDefaultProfileEnvVar, + } +) + +// EnvConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type EnvConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Credentials aws.Credentials + + // ContainerCredentialsEndpoint value is the HTTP enabled endpoint to retrieve credentials + // using the endpointcreds.Provider + ContainerCredentialsEndpoint string + + // ContainerCredentialsRelativePath is the relative URI path that will be used when attempting to retrieve + // credentials from the container endpoint. + ContainerCredentialsRelativePath string + + // ContainerAuthorizationToken is the authorization token that will be included in the HTTP Authorization + // header when attempting to retrieve credentials from the container credentials endpoint. + ContainerAuthorizationToken string + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-west-2 + // AWS_DEFAULT_REGION=us-west-2 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // AWS_DEFAULT_PROFILE=my_profile + SharedConfigProfile string + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the config. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion *bool + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies if EC2 IMDSv1 fallback is disabled. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies if the S3 service should disable multi-region access points + // support. + // + // AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK Defaults Mode used by services. + // + // AWS_DEFAULTS_MODE=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // AWS_MAX_ATTEMPTS=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // aws_retry_mode=standard + RetryMode aws.RetryMode + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string +} + +// loadEnvConfig reads configuration values from the OS's environment variables. +// Returning the a Config typed EnvConfig to satisfy the ConfigLoader func type. +func loadEnvConfig(ctx context.Context, cfgs configs) (Config, error) { + return NewEnvConfig() +} + +// NewEnvConfig retrieves the SDK's environment configuration. +// See `EnvConfig` for the values that will be retrieved. +func NewEnvConfig() (EnvConfig, error) { + var cfg EnvConfig + + creds := aws.Credentials{ + Source: CredentialsSourceName, + } + setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) + setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) + if creds.HasKeys() { + creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) + cfg.Credentials = creds + } + + cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar) + cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar) + cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar) + + setStringFromEnvVal(&cfg.Region, regionEnvKeys) + setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys) + + cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar) + cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar) + + cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar) + + cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar) + + cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) + cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + + cfg.AppID = os.Getenv(awsSdkAppID) + + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil { + return cfg, err + } + + setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled}) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil { + return cfg, err + } + cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { + return cfg, err + } + + if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { + return cfg, err + } + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil { + return cfg, err + } + + if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil { + return cfg, err + } + + if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil { + return cfg, err + } + if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil { + return cfg, err + } + + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) + + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { + return cfg, err + } + + return cfg, nil +} + +func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + return c.DefaultsMode, true, nil +} + +func (c EnvConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + +// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, +// and not 0. +func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode of AWS_RETRY_MODE if was specified, and a +// valid value. +func (c EnvConfig) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + return c.RetryMode, true, nil +} + +func setEC2IMDSClientEnableState(state *imds.ClientEnableState, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + switch { + case strings.EqualFold(value, "true"): + *state = imds.ClientDisabled + case strings.EqualFold(value, "false"): + *state = imds.ClientEnabled + default: + continue + } + break + } +} + +func setDefaultsModeFromEnvVal(mode *aws.DefaultsMode, keys []string) error { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid %s value: %s", k, value) + } + break + } + } + return nil +} + +func setRetryModeFromEnvVal(mode *aws.RetryMode, keys []string) (err error) { + for _, k := range keys { + if value := os.Getenv(k); len(value) > 0 { + *mode, err = aws.ParseRetryMode(value) + if err != nil { + return fmt.Errorf("invalid %s value, %w", k, err) + } + break + } + } + return nil +} + +func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + } + return nil +} + +// GetRegion returns the AWS Region if set in the environment. Returns an empty +// string if not set. +func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetSharedConfigProfile returns the shared config profile if set in the +// environment. Returns an empty string if not set. +func (c EnvConfig) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(c.SharedConfigProfile) == 0 { + return "", false, nil + } + + return c.SharedConfigProfile, true, nil +} + +// getSharedConfigFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Config +func (c EnvConfig) getSharedConfigFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedConfigFile; len(v) > 0 { + files = append(files, v) + } + + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// getSharedCredentialsFiles returns a slice of filenames set in the environment. +// +// Will return the filenames in the order of: +// * Shared Credentials +func (c EnvConfig) getSharedCredentialsFiles(context.Context) ([]string, bool, error) { + var files []string + if v := c.SharedCredentialsFile; len(v) > 0 { + files = append(files, v) + } + if len(files) == 0 { + return nil, false, nil + } + return files, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { + return endpt, true, nil + } + return "", false, nil +} + +func normalizeEnv(sdkID string) string { + upper := strings.ToUpper(sdkID) + return strings.ReplaceAll(upper, " ", "_") +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point +// support for the S3 client. +func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c EnvConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c EnvConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +func setStringFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func setIntFromEnvVal(dst *int, keys []string) error { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %s=%s, %w", k, v, err) + } + *dst = int(i) + break + } + } + + return nil +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + if *dst == nil { + *dst = new(bool) + } + + switch { + case strings.EqualFold(value, "false"): + **dst = false + case strings.EqualFold(value, "true"): + **dst = true + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + k, value) + } + break + } + + return nil +} + +func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false or auto", + k, value) + } + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *aws.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *aws.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = aws.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = aws.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +// GetEnableEndpointDiscovery returns resolved value for EnableEndpointDiscovery env variable setting. +func (c EnvConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (c EnvConfig) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if c.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return c.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go new file mode 100644 index 00000000000..654a7a77fb7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/generate.go @@ -0,0 +1,4 @@ +package config + +//go:generate go run -tags codegen ./codegen -output=provider_assert_test.go +//go:generate gofmt -s -w ./ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go new file mode 100644 index 00000000000..1bdac9754d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package config + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.24.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go new file mode 100644 index 00000000000..7480bb45ed1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -0,0 +1,1046 @@ +package config + +import ( + "context" + "io" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// LoadOptionsFunc is a type alias for LoadOptions functional option +type LoadOptionsFunc func(*LoadOptions) error + +// LoadOptions are discrete set of options that are valid for loading the +// configuration +type LoadOptions struct { + + // Region is the region to send requests to. + Region string + + // Credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // Token provider for authentication operations with bearer authentication. + BearerAuthTokenProvider smithybearer.TokenProvider + + // HTTPClient the SDK's API clients will use to invoke HTTP requests. + HTTPClient HTTPClient + + // EndpointResolver that can be used to provide or override an endpoint for + // the given service and region. + // + // See the `aws.EndpointResolver` documentation on usage. + // + // Deprecated: See EndpointResolverWithOptions + EndpointResolver aws.EndpointResolver + + // EndpointResolverWithOptions that can be used to provide or override an + // endpoint for the given service and region. + // + // See the `aws.EndpointResolverWithOptions` documentation on usage. + EndpointResolverWithOptions aws.EndpointResolverWithOptions + + // RetryMaxAttempts specifies the maximum number attempts an API client + // will call an operation that fails with a retryable error. + // + // This value will only be used if Retryer option is nil. + RetryMaxAttempts int + + // RetryMode specifies the retry model the API client will be created with. + // + // This value will only be used if Retryer option is nil. + RetryMode aws.RetryMode + + // Retryer is a function that provides a Retryer implementation. A Retryer + // guides how HTTP requests should be retried in case of recoverable + // failures. + // + // If not nil, RetryMaxAttempts, and RetryMode will be ignored. + Retryer func() aws.Retryer + + // APIOptions provides the set of middleware mutations modify how the API + // client requests will be handled. This is useful for adding additional + // tracing data to a request, or changing behavior of the SDK's client. + APIOptions []func(*middleware.Stack) error + + // Logger writer interface to write logging messages to. + Logger logging.Logger + + // ClientLogMode is used to configure the events that will be sent to the + // configured logger. This can be used to configure the logging of signing, + // retries, request, and responses of the SDK clients. + // + // See the ClientLogMode type documentation for the complete set of logging + // modes and available configuration. + ClientLogMode *aws.ClientLogMode + + // SharedConfigProfile is the profile to be used when loading the SharedConfig + SharedConfigProfile string + + // SharedConfigFiles is the slice of custom shared config files to use when + // loading the SharedConfig. A non-default profile used within config file + // must have name defined with prefix 'profile '. eg [profile xyz] + // indicates a profile with name 'xyz'. To read more on the format of the + // config file, please refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-config + // + // If duplicate profiles are provided within the same, or across multiple + // shared config files, the next parsed profile will override only the + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedConfigFiles []string + + // SharedCredentialsFile is the slice of custom shared credentials files to + // use when loading the SharedConfig. The profile name used within + // credentials file must not prefix 'profile '. eg [xyz] indicates a + // profile with name 'xyz'. Profile declared as [profile xyz] will be + // ignored. To read more on the format of the credentials file, please + // refer the documentation at + // https://docs.aws.amazon.com/credref/latest/refdocs/file-format.html#file-format-creds + // + // If duplicate profiles are provided with a same, or across multiple + // shared credentials files, the next parsed profile will override only + // properties that conflict with the previously defined profile. Note that + // if duplicate profiles are provided within the SharedCredentialsFiles and + // SharedConfigFiles, the properties defined in shared credentials file + // take precedence. + SharedCredentialsFiles []string + + // CustomCABundle is CA bundle PEM bytes reader + CustomCABundle io.Reader + + // DefaultRegion is the fall back region, used if a region was not resolved + // from other sources + DefaultRegion string + + // UseEC2IMDSRegion indicates if SDK should retrieve the region + // from the EC2 Metadata service + UseEC2IMDSRegion *UseEC2IMDSRegion + + // CredentialsCacheOptions is a function for setting the + // aws.CredentialsCacheOptions + CredentialsCacheOptions func(*aws.CredentialsCacheOptions) + + // BearerAuthTokenCacheOptions is a function for setting the smithy-go + // auth/bearer#TokenCacheOptions + BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) + + // SSOTokenProviderOptions is a function for setting the + // credentials/ssocreds.SSOTokenProviderOptions + SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) + + // ProcessCredentialOptions is a function for setting + // the processcreds.Options + ProcessCredentialOptions func(*processcreds.Options) + + // EC2RoleCredentialOptions is a function for setting + // the ec2rolecreds.Options + EC2RoleCredentialOptions func(*ec2rolecreds.Options) + + // EndpointCredentialOptions is a function for setting + // the endpointcreds.Options + EndpointCredentialOptions func(*endpointcreds.Options) + + // WebIdentityRoleCredentialOptions is a function for setting + // the stscreds.WebIdentityRoleOptions + WebIdentityRoleCredentialOptions func(*stscreds.WebIdentityRoleOptions) + + // AssumeRoleCredentialOptions is a function for setting the + // stscreds.AssumeRoleOptions + AssumeRoleCredentialOptions func(*stscreds.AssumeRoleOptions) + + // SSOProviderOptions is a function for setting + // the ssocreds.Options + SSOProviderOptions func(options *ssocreds.Options) + + // LogConfigurationWarnings when set to true, enables logging + // configuration warnings + LogConfigurationWarnings *bool + + // S3UseARNRegion specifies if the S3 service should allow ARNs to direct + // the region, the client's requests are sent to. + S3UseARNRegion *bool + + // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable + // the S3 Multi-Region access points feature. + S3DisableMultiRegionAccessPoints *bool + + // EnableEndpointDiscovery specifies if endpoint discovery is enable for + // the client. + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the EC2 IMDS service client is enabled. + // + // AWS_EC2_METADATA_DISABLED=true + EC2IMDSClientEnableState imds.ClientEnableState + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + EC2IMDSEndpoint string + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies the SDK configuration mode for defaults. + DefaultsModeOptions DefaultsModeOptions + + // The sdk app ID retrieved from env var or shared config to be added to request user agent header + AppID string +} + +func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { + if len(o.DefaultsModeOptions.Mode) == 0 { + return "", false, nil + } + return o.DefaultsModeOptions.Mode, true, nil +} + +// GetRetryMaxAttempts returns the RetryMaxAttempts if specified in the +// LoadOptions and not 0. +func (o LoadOptions) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + return o.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the RetryMode specified in the LoadOptions. +func (o LoadOptions) GetRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if len(o.RetryMode) == 0 { + return "", false, nil + } + return o.RetryMode, true, nil +} + +func (o LoadOptions) getDefaultsModeIMDSClient(ctx context.Context) (*imds.Client, bool, error) { + if o.DefaultsModeOptions.IMDSClient == nil { + return nil, false, nil + } + return o.DefaultsModeOptions.IMDSClient, true, nil +} + +// getRegion returns Region from config's LoadOptions +func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) { + if len(o.Region) == 0 { + return "", false, nil + } + + return o.Region, true, nil +} + +// getAppID returns AppID from config's LoadOptions +func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) { + return o.AppID, len(o.AppID) > 0, nil +} + +// WithRegion is a helper function to construct functional options +// that sets Region on config's LoadOptions. Setting the region to +// an empty string, will result in the region value being ignored. +// If multiple WithRegion calls are made, the last call overrides +// the previous call values. +func WithRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Region = v + return nil + } +} + +// WithAppID is a helper function to construct functional options +// that sets AppID on config's LoadOptions. +func WithAppID(ID string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AppID = ID + return nil + } +} + +// getDefaultRegion returns DefaultRegion from config's LoadOptions +func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { + if len(o.DefaultRegion) == 0 { + return "", false, nil + } + + return o.DefaultRegion, true, nil +} + +// WithDefaultRegion is a helper function to construct functional options +// that sets a DefaultRegion on config's LoadOptions. Setting the default +// region to an empty string, will result in the default region value +// being ignored. If multiple WithDefaultRegion calls are made, the last +// call overrides the previous call values. Note that both WithRegion and +// WithEC2IMDSRegion call takes precedence over WithDefaultRegion call +// when resolving region. +func WithDefaultRegion(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.DefaultRegion = v + return nil + } +} + +// getSharedConfigProfile returns SharedConfigProfile from config's LoadOptions +func (o LoadOptions) getSharedConfigProfile(ctx context.Context) (string, bool, error) { + if len(o.SharedConfigProfile) == 0 { + return "", false, nil + } + + return o.SharedConfigProfile, true, nil +} + +// WithSharedConfigProfile is a helper function to construct functional options +// that sets SharedConfigProfile on config's LoadOptions. Setting the shared +// config profile to an empty string, will result in the shared config profile +// value being ignored. +// If multiple WithSharedConfigProfile calls are made, the last call overrides +// the previous call values. +func WithSharedConfigProfile(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigProfile = v + return nil + } +} + +// getSharedConfigFiles returns SharedConfigFiles set on config's LoadOptions +func (o LoadOptions) getSharedConfigFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedConfigFiles == nil { + return nil, false, nil + } + + return o.SharedConfigFiles, true, nil +} + +// WithSharedConfigFiles is a helper function to construct functional options +// that sets slice of SharedConfigFiles on config's LoadOptions. +// Setting the shared config files to an nil string slice, will result in the +// shared config files value being ignored. +// If multiple WithSharedConfigFiles calls are made, the last call overrides +// the previous call values. +func WithSharedConfigFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedConfigFiles = v + return nil + } +} + +// getSharedCredentialsFiles returns SharedCredentialsFiles set on config's LoadOptions +func (o LoadOptions) getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) { + if o.SharedCredentialsFiles == nil { + return nil, false, nil + } + + return o.SharedCredentialsFiles, true, nil +} + +// WithSharedCredentialsFiles is a helper function to construct functional options +// that sets slice of SharedCredentialsFiles on config's LoadOptions. +// Setting the shared credentials files to an nil string slice, will result in the +// shared credentials files value being ignored. +// If multiple WithSharedCredentialsFiles calls are made, the last call overrides +// the previous call values. +func WithSharedCredentialsFiles(v []string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SharedCredentialsFiles = v + return nil + } +} + +// getCustomCABundle returns CustomCABundle from LoadOptions +func (o LoadOptions) getCustomCABundle(ctx context.Context) (io.Reader, bool, error) { + if o.CustomCABundle == nil { + return nil, false, nil + } + + return o.CustomCABundle, true, nil +} + +// WithCustomCABundle is a helper function to construct functional options +// that sets CustomCABundle on config's LoadOptions. Setting the custom CA Bundle +// to nil will result in custom CA Bundle value being ignored. +// If multiple WithCustomCABundle calls are made, the last call overrides the +// previous call values. +func WithCustomCABundle(v io.Reader) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CustomCABundle = v + return nil + } +} + +// UseEC2IMDSRegion provides a regionProvider that retrieves the region +// from the EC2 Metadata service. +type UseEC2IMDSRegion struct { + // If unset will default to generic EC2 IMDS client. + Client *imds.Client +} + +// getRegion attempts to retrieve the region from EC2 Metadata service. +func (p *UseEC2IMDSRegion) getRegion(ctx context.Context) (string, bool, error) { + if ctx == nil { + ctx = context.Background() + } + + client := p.Client + if client == nil { + client = imds.New(imds.Options{}) + } + + result, err := client.GetRegion(ctx, nil) + if err != nil { + return "", false, err + } + if len(result.Region) != 0 { + return result.Region, true, nil + } + return "", false, nil +} + +// getEC2IMDSRegion returns the value of EC2 IMDS region. +func (o LoadOptions) getEC2IMDSRegion(ctx context.Context) (string, bool, error) { + if o.UseEC2IMDSRegion == nil { + return "", false, nil + } + + return o.UseEC2IMDSRegion.getRegion(ctx) +} + +// WithEC2IMDSRegion is a helper function to construct functional options +// that enables resolving EC2IMDS region. The function takes +// in a UseEC2IMDSRegion functional option, and can be used to set the +// EC2IMDS client which will be used to resolve EC2IMDSRegion. +// If no functional option is provided, an EC2IMDS client is built and used +// by the resolver. If multiple WithEC2IMDSRegion calls are made, the last +// call overrides the previous call values. Note that the WithRegion calls takes +// precedence over WithEC2IMDSRegion when resolving region. +func WithEC2IMDSRegion(fnOpts ...func(o *UseEC2IMDSRegion)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseEC2IMDSRegion = &UseEC2IMDSRegion{} + + for _, fn := range fnOpts { + fn(o.UseEC2IMDSRegion) + } + return nil + } +} + +// getCredentialsProvider returns the credentials value +func (o LoadOptions) getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) { + if o.Credentials == nil { + return nil, false, nil + } + + return o.Credentials, true, nil +} + +// WithCredentialsProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithCredentialsProvider calls are made, the last call overrides +// the previous call values. +func WithCredentialsProvider(v aws.CredentialsProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Credentials = v + return nil + } +} + +// getCredentialsCacheOptionsProvider returns the wrapped function to set aws.CredentialsCacheOptions +func (o LoadOptions) getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) { + if o.CredentialsCacheOptions == nil { + return nil, false, nil + } + + return o.CredentialsCacheOptions, true, nil +} + +// WithCredentialsCacheOptions is a helper function to construct functional +// options that sets a function to modify the aws.CredentialsCacheOptions the +// aws.CredentialsCache will be configured with, if the CredentialsCache is used +// by the configuration loader. +// +// If multiple WithCredentialsCacheOptions calls are made, the last call +// overrides the previous call values. +func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.CredentialsCacheOptions = v + return nil + } +} + +// getBearerAuthTokenProvider returns the credentials value +func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { + if o.BearerAuthTokenProvider == nil { + return nil, false, nil + } + + return o.BearerAuthTokenProvider, true, nil +} + +// WithBearerAuthTokenProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenProvider = v + return nil + } +} + +// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { + if o.BearerAuthTokenCacheOptions == nil { + return nil, false, nil + } + + return o.BearerAuthTokenCacheOptions, true, nil +} + +// WithBearerAuthTokenCacheOptions is a helper function to construct functional options +// that sets a function to modify the TokenCacheOptions the smithy-go +// auth/bearer#TokenCache will be configured with, if the TokenCache is used by +// the configuration loader. +// +// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenCacheOptions = v + return nil + } +} + +// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { + if o.SSOTokenProviderOptions == nil { + return nil, false, nil + } + + return o.SSOTokenProviderOptions, true, nil +} + +// WithSSOTokenProviderOptions is a helper function to construct functional +// options that sets a function to modify the SSOtokenProviderOptions the SDK's +// credentials/ssocreds#SSOProvider will be configured with, if the +// SSOTokenProvider is used by the configuration loader. +// +// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOTokenProviderOptions = v + return nil + } +} + +// getProcessCredentialOptions returns the wrapped function to set processcreds.Options +func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { + if o.ProcessCredentialOptions == nil { + return nil, false, nil + } + + return o.ProcessCredentialOptions, true, nil +} + +// WithProcessCredentialOptions is a helper function to construct functional options +// that sets a function to use processcreds.Options on config's LoadOptions. +// If process credential options is set to nil, the process credential value will +// be ignored. If multiple WithProcessCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithProcessCredentialOptions(v func(*processcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ProcessCredentialOptions = v + return nil + } +} + +// getEC2RoleCredentialOptions returns the wrapped function to set the ec2rolecreds.Options +func (o LoadOptions) getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) { + if o.EC2RoleCredentialOptions == nil { + return nil, false, nil + } + + return o.EC2RoleCredentialOptions, true, nil +} + +// WithEC2RoleCredentialOptions is a helper function to construct functional options +// that sets a function to use ec2rolecreds.Options on config's LoadOptions. If +// EC2 role credential options is set to nil, the EC2 role credential options value +// will be ignored. If multiple WithEC2RoleCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEC2RoleCredentialOptions(v func(*ec2rolecreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2RoleCredentialOptions = v + return nil + } +} + +// getEndpointCredentialOptions returns the wrapped function to set endpointcreds.Options +func (o LoadOptions) getEndpointCredentialOptions(context.Context) (func(*endpointcreds.Options), bool, error) { + if o.EndpointCredentialOptions == nil { + return nil, false, nil + } + + return o.EndpointCredentialOptions, true, nil +} + +// WithEndpointCredentialOptions is a helper function to construct functional options +// that sets a function to use endpointcreds.Options on config's LoadOptions. If +// endpoint credential options is set to nil, the endpoint credential options +// value will be ignored. If multiple WithEndpointCredentialOptions calls are made, +// the last call overrides the previous call values. +func WithEndpointCredentialOptions(v func(*endpointcreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointCredentialOptions = v + return nil + } +} + +// getWebIdentityRoleCredentialOptions returns the wrapped function +func (o LoadOptions) getWebIdentityRoleCredentialOptions(context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) { + if o.WebIdentityRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.WebIdentityRoleCredentialOptions, true, nil +} + +// WithWebIdentityRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.WebIdentityRoleOptions +// on config's LoadOptions. If web identity role credentials options is set to nil, +// the web identity role credentials value will be ignored. If multiple +// WithWebIdentityRoleCredentialOptions calls are made, the last call +// overrides the previous call values. +func WithWebIdentityRoleCredentialOptions(v func(*stscreds.WebIdentityRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.WebIdentityRoleCredentialOptions = v + return nil + } +} + +// getAssumeRoleCredentialOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getAssumeRoleCredentialOptions(context.Context) (func(options *stscreds.AssumeRoleOptions), bool, error) { + if o.AssumeRoleCredentialOptions == nil { + return nil, false, nil + } + + return o.AssumeRoleCredentialOptions, true, nil +} + +// WithAssumeRoleCredentialOptions is a helper function to construct +// functional options that sets a function to use stscreds.AssumeRoleOptions +// on config's LoadOptions. If assume role credentials options is set to nil, +// the assume role credentials value will be ignored. If multiple +// WithAssumeRoleCredentialOptions calls are made, the last call overrides +// the previous call values. +func WithAssumeRoleCredentialOptions(v func(*stscreds.AssumeRoleOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.AssumeRoleCredentialOptions = v + return nil + } +} + +func (o LoadOptions) getHTTPClient(ctx context.Context) (HTTPClient, bool, error) { + if o.HTTPClient == nil { + return nil, false, nil + } + + return o.HTTPClient, true, nil +} + +// WithHTTPClient is a helper function to construct functional options +// that sets HTTPClient on LoadOptions. If HTTPClient is set to nil, +// the HTTPClient value will be ignored. +// If multiple WithHTTPClient calls are made, the last call overrides +// the previous call values. +func WithHTTPClient(v HTTPClient) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.HTTPClient = v + return nil + } +} + +func (o LoadOptions) getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) { + if o.APIOptions == nil { + return nil, false, nil + } + + return o.APIOptions, true, nil +} + +// WithAPIOptions is a helper function to construct functional options +// that sets APIOptions on LoadOptions. If APIOptions is set to nil, the +// APIOptions value is ignored. If multiple WithAPIOptions calls are +// made, the last call overrides the previous call values. +func WithAPIOptions(v []func(*middleware.Stack) error) LoadOptionsFunc { + return func(o *LoadOptions) error { + if v == nil { + return nil + } + + o.APIOptions = append(o.APIOptions, v...) + return nil + } +} + +func (o LoadOptions) getRetryMaxAttempts(ctx context.Context) (int, bool, error) { + if o.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return o.RetryMaxAttempts, true, nil +} + +// WithRetryMaxAttempts is a helper function to construct functional options that sets +// RetryMaxAttempts on LoadOptions. If RetryMaxAttempts is unset, the RetryMaxAttempts value is +// ignored. If multiple WithRetryMaxAttempts calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMaxAttempts(v int) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMaxAttempts = v + return nil + } +} + +func (o LoadOptions) getRetryMode(ctx context.Context) (aws.RetryMode, bool, error) { + if o.RetryMode == "" { + return "", false, nil + } + + return o.RetryMode, true, nil +} + +// WithRetryMode is a helper function to construct functional options that sets +// RetryMode on LoadOptions. If RetryMode is unset, the RetryMode value is +// ignored. If multiple WithRetryMode calls are made, the last call overrides +// the previous call values. +// +// Will be ignored of LoadOptions.Retryer or WithRetryer are used. +func WithRetryMode(v aws.RetryMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.RetryMode = v + return nil + } +} + +func (o LoadOptions) getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) { + if o.Retryer == nil { + return nil, false, nil + } + + return o.Retryer, true, nil +} + +// WithRetryer is a helper function to construct functional options +// that sets Retryer on LoadOptions. If Retryer is set to nil, the +// Retryer value is ignored. If multiple WithRetryer calls are +// made, the last call overrides the previous call values. +func WithRetryer(v func() aws.Retryer) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Retryer = v + return nil + } +} + +func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) { + if o.EndpointResolver == nil { + return nil, false, nil + } + + return o.EndpointResolver, true, nil +} + +// WithEndpointResolver is a helper function to construct functional options +// that sets the EndpointResolver on LoadOptions. If the EndpointResolver is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +// +// Deprecated: See WithEndpointResolverWithOptions +func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolver = v + return nil + } +} + +func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) { + if o.EndpointResolverWithOptions == nil { + return nil, false, nil + } + + return o.EndpointResolverWithOptions, true, nil +} + +// WithEndpointResolverWithOptions is a helper function to construct functional options +// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, +// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls +// are made, the last call overrides the previous call values. +func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EndpointResolverWithOptions = v + return nil + } +} + +func (o LoadOptions) getLogger(ctx context.Context) (logging.Logger, bool, error) { + if o.Logger == nil { + return nil, false, nil + } + + return o.Logger, true, nil +} + +// WithLogger is a helper function to construct functional options +// that sets Logger on LoadOptions. If Logger is set to nil, the +// Logger value will be ignored. If multiple WithLogger calls are made, +// the last call overrides the previous call values. +func WithLogger(v logging.Logger) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.Logger = v + return nil + } +} + +func (o LoadOptions) getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) { + if o.ClientLogMode == nil { + return 0, false, nil + } + + return *o.ClientLogMode, true, nil +} + +// WithClientLogMode is a helper function to construct functional options +// that sets client log mode on LoadOptions. If client log mode is set to nil, +// the client log mode value will be ignored. If multiple WithClientLogMode calls are made, +// the last call overrides the previous call values. +func WithClientLogMode(v aws.ClientLogMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.ClientLogMode = &v + return nil + } +} + +func (o LoadOptions) getLogConfigurationWarnings(ctx context.Context) (v bool, found bool, err error) { + if o.LogConfigurationWarnings == nil { + return false, false, nil + } + return *o.LogConfigurationWarnings, true, nil +} + +// WithLogConfigurationWarnings is a helper function to construct +// functional options that can be used to set LogConfigurationWarnings +// on LoadOptions. +// +// If multiple WithLogConfigurationWarnings calls are made, the last call +// overrides the previous call values. +func WithLogConfigurationWarnings(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.LogConfigurationWarnings = &v + return nil + } +} + +// GetS3UseARNRegion returns whether to allow ARNs to direct the region +// the S3 client's requests are sent to. +func (o LoadOptions) GetS3UseARNRegion(ctx context.Context) (v bool, found bool, err error) { + if o.S3UseARNRegion == nil { + return false, false, nil + } + return *o.S3UseARNRegion, true, nil +} + +// WithS3UseARNRegion is a helper function to construct functional options +// that can be used to set S3UseARNRegion on LoadOptions. +// If multiple WithS3UseARNRegion calls are made, the last call overrides +// the previous call values. +func WithS3UseARNRegion(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3UseARNRegion = &v + return nil + } +} + +// GetS3DisableMultiRegionAccessPoints returns whether to disable +// the S3 multi-region access points feature. +func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) { + if o.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + return *o.S3DisableMultiRegionAccessPoints, true, nil +} + +// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options +// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions. +// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides +// the previous call values. +func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.S3DisableMultiRegionAccessPoints = &v + return nil + } +} + +// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set. +func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + return o.EnableEndpointDiscovery, true, nil +} + +// WithEndpointDiscovery is a helper function to construct functional options +// that can be used to enable endpoint discovery on LoadOptions for supported clients. +// If multiple WithEndpointDiscovery calls are made, the last call overrides +// the previous call values. +func WithEndpointDiscovery(v aws.EndpointDiscoveryEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EnableEndpointDiscovery = v + return nil + } +} + +// getSSOProviderOptions returns AssumeRoleCredentialOptions from LoadOptions +func (o LoadOptions) getSSOProviderOptions(context.Context) (func(options *ssocreds.Options), bool, error) { + if o.SSOProviderOptions == nil { + return nil, false, nil + } + + return o.SSOProviderOptions, true, nil +} + +// WithSSOProviderOptions is a helper function to construct +// functional options that sets a function to use ssocreds.Options +// on config's LoadOptions. If the SSO credential provider options is set to nil, +// the sso provider options value will be ignored. If multiple +// WithSSOProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOProviderOptions(v func(*ssocreds.Options)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOProviderOptions = v + return nil + } +} + +// GetEC2IMDSClientEnableState implements a EC2IMDSClientEnableState options resolver interface. +func (o LoadOptions) GetEC2IMDSClientEnableState() (imds.ClientEnableState, bool, error) { + if o.EC2IMDSClientEnableState == imds.ClientDefaultEnableState { + return imds.ClientDefaultEnableState, false, nil + } + + return o.EC2IMDSClientEnableState, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if o.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return o.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (o LoadOptions) GetEC2IMDSEndpoint() (string, bool, error) { + if len(o.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return o.EC2IMDSEndpoint, true, nil +} + +// WithEC2IMDSClientEnableState is a helper function to construct functional options that sets the EC2IMDSClientEnableState. +func WithEC2IMDSClientEnableState(v imds.ClientEnableState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSClientEnableState = v + return nil + } +} + +// WithEC2IMDSEndpointMode is a helper function to construct functional options that sets the EC2IMDSEndpointMode. +func WithEC2IMDSEndpointMode(v imds.EndpointModeState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpointMode = v + return nil + } +} + +// WithEC2IMDSEndpoint is a helper function to construct functional options that sets the EC2IMDSEndpoint. +func WithEC2IMDSEndpoint(v string) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.EC2IMDSEndpoint = v + return nil + } +} + +// WithUseDualStackEndpoint is a helper function to construct +// functional options that can be used to set UseDualStackEndpoint on LoadOptions. +func WithUseDualStackEndpoint(v aws.DualStackEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseDualStackEndpoint = v + return nil + } +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (o LoadOptions) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + return o.UseDualStackEndpoint, true, nil +} + +// WithUseFIPSEndpoint is a helper function to construct +// functional options that can be used to set UseFIPSEndpoint on LoadOptions. +func WithUseFIPSEndpoint(v aws.FIPSEndpointState) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.UseFIPSEndpoint = v + return nil + } +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (o LoadOptions) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if o.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + return o.UseFIPSEndpoint, true, nil +} + +// WithDefaultsMode sets the SDK defaults configuration mode to the value provided. +// +// Zero or more functional options can be provided to provide configuration options for performing +// environment discovery when using aws.DefaultsModeAuto. +func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsModeOptions)) LoadOptionsFunc { + do := DefaultsModeOptions{ + Mode: mode, + } + for _, fn := range optFns { + fn(&do) + } + return func(options *LoadOptions) error { + options.DefaultsModeOptions = do + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/local.go b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go new file mode 100644 index 00000000000..b629137c821 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/local.go @@ -0,0 +1,51 @@ +package config + +import ( + "fmt" + "net" + "net/url" +) + +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + if len(addrs) == 0 { + return false, fmt.Errorf("no addrs found for host, %s", host) + } + + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func validateLocalURL(v string) error { + u, err := url.Parse(v) + if err != nil { + return err + } + + host := u.Hostname() + if len(host) == 0 { + return fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, err := isLoopbackHost(host); err != nil { + return fmt.Errorf("failed to resolve host %q, %v", host, err) + } else if !isLoopback { + return fmt.Errorf("invalid endpoint host, %q, only host resolving to loopback addresses are allowed", host) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go new file mode 100644 index 00000000000..d5235846011 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -0,0 +1,670 @@ +package config + +import ( + "context" + "io" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// sharedConfigProfileProvider provides access to the shared config profile +// name external configuration value. +type sharedConfigProfileProvider interface { + getSharedConfigProfile(ctx context.Context) (string, bool, error) +} + +// getSharedConfigProfile searches the configs for a sharedConfigProfileProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigProfile(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigProfileProvider); ok { + value, found, err = p.getSharedConfigProfile(ctx) + if err != nil || found { + break + } + } + } + return +} + +// sharedConfigFilesProvider provides access to the shared config filesnames +// external configuration value. +type sharedConfigFilesProvider interface { + getSharedConfigFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedConfigFiles searches the configs for a sharedConfigFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedConfigFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedConfigFilesProvider); ok { + value, found, err = p.getSharedConfigFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// sharedCredentialsFilesProvider provides access to the shared credentials filesnames +// external configuration value. +type sharedCredentialsFilesProvider interface { + getSharedCredentialsFiles(ctx context.Context) ([]string, bool, error) +} + +// getSharedCredentialsFiles searches the configs for a sharedCredentialsFilesProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getSharedCredentialsFiles(ctx context.Context, configs configs) (value []string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(sharedCredentialsFilesProvider); ok { + value, found, err = p.getSharedCredentialsFiles(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// customCABundleProvider provides access to the custom CA bundle PEM bytes. +type customCABundleProvider interface { + getCustomCABundle(ctx context.Context) (io.Reader, bool, error) +} + +// getCustomCABundle searches the configs for a customCABundleProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCustomCABundle(ctx context.Context, configs configs) (value io.Reader, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(customCABundleProvider); ok { + value, found, err = p.getCustomCABundle(ctx) + if err != nil || found { + break + } + } + } + + return +} + +// regionProvider provides access to the region external configuration value. +type regionProvider interface { + getRegion(ctx context.Context) (string, bool, error) +} + +// getRegion searches the configs for a regionProvider and returns the value +// if found. Returns an error if a provider fails before a value is found. +func getRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(regionProvider); ok { + value, found, err = p.getRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +type baseEndpointProvider interface { + getBaseEndpoint(ctx context.Context) (string, bool, error) +} + +func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(baseEndpointProvider); ok { + value, found, err = p.getBaseEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +type servicesObjectProvider interface { + getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) +} + +func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(servicesObjectProvider); ok { + value, found, err = p.getServicesObject(ctx) + if err != nil || found { + break + } + } + } + return +} + +// appIDProvider provides access to the sdk app ID value +type appIDProvider interface { + getAppID(ctx context.Context) (string, bool, error) +} + +func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(appIDProvider); ok { + value, found, err = p.getAppID(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2IMDSRegionProvider provides access to the ec2 imds region +// configuration value +type ec2IMDSRegionProvider interface { + getEC2IMDSRegion(ctx context.Context) (string, bool, error) +} + +// getEC2IMDSRegion searches the configs for a ec2IMDSRegionProvider and +// returns the value if found. Returns an error if a provider fails before +// a value is found. +func getEC2IMDSRegion(ctx context.Context, configs configs) (region string, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(ec2IMDSRegionProvider); ok { + region, found, err = provider.getEC2IMDSRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsProviderProvider provides access to the credentials external +// configuration value. +type credentialsProviderProvider interface { + getCredentialsProvider(ctx context.Context) (aws.CredentialsProvider, bool, error) +} + +// getCredentialsProvider searches the configs for a credentialsProviderProvider +// and returns the value if found. Returns an error if a provider fails before a +// value is found. +func getCredentialsProvider(ctx context.Context, configs configs) (p aws.CredentialsProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(credentialsProviderProvider); ok { + p, found, err = provider.getCredentialsProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// credentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +type credentialsCacheOptionsProvider interface { + getCredentialsCacheOptions(ctx context.Context) (func(*aws.CredentialsCacheOptions), bool, error) +} + +// getCredentialsCacheOptionsProvider is an interface for retrieving a function for setting +// the aws.CredentialsCacheOptions. +func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( + f func(*aws.CredentialsCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(credentialsCacheOptionsProvider); ok { + f, found, err = p.getCredentialsCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenProviderProvider provides access to the bearer authentication +// token external configuration value. +type bearerAuthTokenProviderProvider interface { + getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) +} + +// getBearerAuthTokenProvider searches the config sources for a +// bearerAuthTokenProviderProvider and returns the value if found. Returns an +// error if a provider fails before a value is found. +func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { + p, found, err = provider.getBearerAuthTokenProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +type bearerAuthTokenCacheOptionsProvider interface { + getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) +} + +// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( + f func(*smithybearer.TokenCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { + f, found, err = p.getBearerAuthTokenCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +type ssoTokenProviderOptionsProvider interface { + getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) +} + +// getSSOTokenProviderOptions is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( + f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(ssoTokenProviderOptionsProvider); ok { + f, found, err = p.getSSOTokenProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider + +// processCredentialOptions is an interface for retrieving a function for setting +// the processcreds.Options. +type processCredentialOptions interface { + getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) +} + +// getProcessCredentialOptions searches the slice of configs and returns the first function found +func getProcessCredentialOptions(ctx context.Context, configs configs) (f func(*processcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(processCredentialOptions); ok { + f, found, err = p.getProcessCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ec2RoleCredentialOptionsProvider is an interface for retrieving a function +// for setting the ec2rolecreds.Provider options. +type ec2RoleCredentialOptionsProvider interface { + getEC2RoleCredentialOptions(ctx context.Context) (func(*ec2rolecreds.Options), bool, error) +} + +// getEC2RoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getEC2RoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*ec2rolecreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(ec2RoleCredentialOptionsProvider); ok { + f, found, err = p.getEC2RoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// defaultRegionProvider is an interface for retrieving a default region if a region was not resolved from other sources +type defaultRegionProvider interface { + getDefaultRegion(ctx context.Context) (string, bool, error) +} + +// getDefaultRegion searches the slice of configs and returns the first fallback region found +func getDefaultRegion(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, config := range configs { + if p, ok := config.(defaultRegionProvider); ok { + value, found, err = p.getDefaultRegion(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointCredentialOptionsProvider is an interface for retrieving a function for setting +// the endpointcreds.ProviderOptions. +type endpointCredentialOptionsProvider interface { + getEndpointCredentialOptions(ctx context.Context) (func(*endpointcreds.Options), bool, error) +} + +// getEndpointCredentialProviderOptions searches the slice of configs and returns the first function found +func getEndpointCredentialProviderOptions(ctx context.Context, configs configs) (f func(*endpointcreds.Options), found bool, err error) { + for _, config := range configs { + if p, ok := config.(endpointCredentialOptionsProvider); ok { + f, found, err = p.getEndpointCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// webIdentityRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.WebIdentityRoleProvider. +type webIdentityRoleCredentialOptionsProvider interface { + getWebIdentityRoleCredentialOptions(ctx context.Context) (func(*stscreds.WebIdentityRoleOptions), bool, error) +} + +// getWebIdentityCredentialProviderOptions searches the slice of configs and returns the first function found +func getWebIdentityCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.WebIdentityRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(webIdentityRoleCredentialOptionsProvider); ok { + f, found, err = p.getWebIdentityRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// assumeRoleCredentialOptionsProvider is an interface for retrieving a function for setting +// the stscreds.AssumeRoleOptions. +type assumeRoleCredentialOptionsProvider interface { + getAssumeRoleCredentialOptions(ctx context.Context) (func(*stscreds.AssumeRoleOptions), bool, error) +} + +// getAssumeRoleCredentialProviderOptions searches the slice of configs and returns the first function found +func getAssumeRoleCredentialProviderOptions(ctx context.Context, configs configs) (f func(*stscreds.AssumeRoleOptions), found bool, err error) { + for _, config := range configs { + if p, ok := config.(assumeRoleCredentialOptionsProvider); ok { + f, found, err = p.getAssumeRoleCredentialOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// HTTPClient is an HTTP client implementation +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// httpClientProvider is an interface for retrieving HTTPClient +type httpClientProvider interface { + getHTTPClient(ctx context.Context) (HTTPClient, bool, error) +} + +// getHTTPClient searches the slice of configs and returns the HTTPClient set on configs +func getHTTPClient(ctx context.Context, configs configs) (client HTTPClient, found bool, err error) { + for _, config := range configs { + if p, ok := config.(httpClientProvider); ok { + client, found, err = p.getHTTPClient(ctx) + if err != nil || found { + break + } + } + } + return +} + +// apiOptionsProvider is an interface for retrieving APIOptions +type apiOptionsProvider interface { + getAPIOptions(ctx context.Context) ([]func(*middleware.Stack) error, bool, error) +} + +// getAPIOptions searches the slice of configs and returns the APIOptions set on configs +func getAPIOptions(ctx context.Context, configs configs) (apiOptions []func(*middleware.Stack) error, found bool, err error) { + for _, config := range configs { + if p, ok := config.(apiOptionsProvider); ok { + // retrieve APIOptions from configs and set it on cfg + apiOptions, found, err = p.getAPIOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverProvider is an interface for retrieving an aws.EndpointResolver from a configuration source +type endpointResolverProvider interface { + getEndpointResolver(ctx context.Context) (aws.EndpointResolver, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolver(ctx context.Context, configs configs) (f aws.EndpointResolver, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverProvider); ok { + f, found, err = p.getEndpointResolver(ctx) + if err != nil || found { + break + } + } + } + return +} + +// endpointResolverWithOptionsProvider is an interface for retrieving an aws.EndpointResolverWithOptions from a configuration source +type endpointResolverWithOptionsProvider interface { + getEndpointResolverWithOptions(ctx context.Context) (aws.EndpointResolverWithOptions, bool, error) +} + +// getEndpointResolver searches the provided config sources for a EndpointResolverFunc that can be used +// to configure the aws.Config.EndpointResolver value. +func getEndpointResolverWithOptions(ctx context.Context, configs configs) (f aws.EndpointResolverWithOptions, found bool, err error) { + for _, c := range configs { + if p, ok := c.(endpointResolverWithOptionsProvider); ok { + f, found, err = p.getEndpointResolverWithOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// loggerProvider is an interface for retrieving a logging.Logger from a configuration source. +type loggerProvider interface { + getLogger(ctx context.Context) (logging.Logger, bool, error) +} + +// getLogger searches the provided config sources for a logging.Logger that can be used +// to configure the aws.Config.Logger value. +func getLogger(ctx context.Context, configs configs) (l logging.Logger, found bool, err error) { + for _, c := range configs { + if p, ok := c.(loggerProvider); ok { + l, found, err = p.getLogger(ctx) + if err != nil || found { + break + } + } + } + return +} + +// clientLogModeProvider is an interface for retrieving the aws.ClientLogMode from a configuration source. +type clientLogModeProvider interface { + getClientLogMode(ctx context.Context) (aws.ClientLogMode, bool, error) +} + +func getClientLogMode(ctx context.Context, configs configs) (m aws.ClientLogMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(clientLogModeProvider); ok { + m, found, err = p.getClientLogMode(ctx) + if err != nil || found { + break + } + } + } + return +} + +// retryProvider is an configuration provider for custom Retryer. +type retryProvider interface { + getRetryer(ctx context.Context) (func() aws.Retryer, bool, error) +} + +func getRetryer(ctx context.Context, configs configs) (v func() aws.Retryer, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryProvider); ok { + v, found, err = p.getRetryer(ctx) + if err != nil || found { + break + } + } + } + return +} + +// logConfigurationWarningsProvider is an configuration provider for +// retrieving a boolean indicating whether configuration issues should +// be logged when loading from config sources +type logConfigurationWarningsProvider interface { + getLogConfigurationWarnings(ctx context.Context) (bool, bool, error) +} + +func getLogConfigurationWarnings(ctx context.Context, configs configs) (v bool, found bool, err error) { + for _, c := range configs { + if p, ok := c.(logConfigurationWarningsProvider); ok { + v, found, err = p.getLogConfigurationWarnings(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoCredentialOptionsProvider is an interface for retrieving a function for setting +// the ssocreds.Options. +type ssoCredentialOptionsProvider interface { + getSSOProviderOptions(context.Context) (func(*ssocreds.Options), bool, error) +} + +func getSSOProviderOptions(ctx context.Context, configs configs) (v func(options *ssocreds.Options), found bool, err error) { + for _, c := range configs { + if p, ok := c.(ssoCredentialOptionsProvider); ok { + v, found, err = p.getSSOProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeIMDSClientProvider interface { + getDefaultsModeIMDSClient(context.Context) (*imds.Client, bool, error) +} + +func getDefaultsModeIMDSClient(ctx context.Context, configs configs) (v *imds.Client, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeIMDSClientProvider); ok { + v, found, err = p.getDefaultsModeIMDSClient(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type defaultsModeProvider interface { + getDefaultsMode(context.Context) (aws.DefaultsMode, bool, error) +} + +func getDefaultsMode(ctx context.Context, configs configs) (v aws.DefaultsMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(defaultsModeProvider); ok { + v, found, err = p.getDefaultsMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryMaxAttemptsProvider interface { + GetRetryMaxAttempts(context.Context) (int, bool, error) +} + +func getRetryMaxAttempts(ctx context.Context, configs configs) (v int, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryMaxAttemptsProvider); ok { + v, found, err = p.GetRetryMaxAttempts(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} + +type retryModeProvider interface { + GetRetryMode(context.Context) (aws.RetryMode, bool, error) +} + +func getRetryMode(ctx context.Context, configs configs) (v aws.RetryMode, found bool, err error) { + for _, c := range configs { + if p, ok := c.(retryModeProvider); ok { + v, found, err = p.GetRetryMode(ctx) + if err != nil || found { + break + } + } + } + return v, found, err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go new file mode 100644 index 00000000000..b5a74b23197 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -0,0 +1,341 @@ +package config + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/smithy-go/logging" +) + +// resolveDefaultAWSConfig will write default configuration values into the cfg +// value. It will write the default values, overwriting any previous value. +// +// This should be used as the first resolver in the slice of resolvers when +// resolving external configuration. +func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { + var sources []interface{} + for _, s := range cfgs { + sources = append(sources, s) + } + + *cfg = aws.Config{ + Credentials: aws.AnonymousCredentials{}, + Logger: logging.NewStandardLogger(os.Stderr), + ConfigSources: sources, + } + return nil +} + +// resolveCustomCABundle extracts the first instance of a custom CA bundle filename +// from the external configurations. It will update the HTTP Client's builder +// to be configured with the custom CA bundle. +// +// Config provider used: +// * customCABundleProvider +func resolveCustomCABundle(ctx context.Context, cfg *aws.Config, cfgs configs) error { + pemCerts, found, err := getCustomCABundle(ctx, cfgs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + if cfg.HTTPClient == nil { + cfg.HTTPClient = awshttp.NewBuildableClient() + } + + trOpts, ok := cfg.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return fmt.Errorf("unable to add custom RootCAs HTTPClient, "+ + "has no WithTransportOptions, %T", cfg.HTTPClient) + } + + var appendErr error + client := trOpts.WithTransportOptions(func(tr *http.Transport) { + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{} + } + if tr.TLSClientConfig.RootCAs == nil { + tr.TLSClientConfig.RootCAs = x509.NewCertPool() + } + + b, err := ioutil.ReadAll(pemCerts) + if err != nil { + appendErr = fmt.Errorf("failed to read custom CA bundle PEM file") + } + + if !tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(b) { + appendErr = fmt.Errorf("failed to load custom CA bundle PEM file") + } + }) + if appendErr != nil { + return appendErr + } + + cfg.HTTPClient = client + return err +} + +// resolveRegion extracts the first instance of a Region from the configs slice. +// +// Config providers used: +// * regionProvider +func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + v, found, err := getRegion(ctx, configs) + if err != nil { + // TODO error handling, What is the best way to handle this? + // capture previous errors continue. error out if all errors + return err + } + if !found { + return nil + } + + cfg.Region = v + return nil +} + +func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { + var downcastCfgSources []interface{} + for _, cs := range configs { + downcastCfgSources = append(downcastCfgSources, interface{}(cs)) + } + + if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { + cfg.BaseEndpoint = nil + return nil + } + + v, found, err := getBaseEndpoint(ctx, configs) + if err != nil { + return err + } + + if !found { + return nil + } + cfg.BaseEndpoint = aws.String(v) + return nil +} + +// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var +func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { + ID, _, err := getAppID(ctx, configs) + if err != nil { + return err + } + + cfg.AppID = ID + return nil +} + +// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default +// region if region had not been resolved from other sources. +func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + v, found, err := getDefaultRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = v + + return nil +} + +// resolveHTTPClient extracts the first instance of a HTTPClient and sets `aws.Config.HTTPClient` to the HTTPClient instance +// if one has not been resolved from other sources. +func resolveHTTPClient(ctx context.Context, cfg *aws.Config, configs configs) error { + c, found, err := getHTTPClient(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.HTTPClient = c + return nil +} + +// resolveAPIOptions extracts the first instance of APIOptions and sets `aws.Config.APIOptions` to the resolved API options +// if one has not been resolved from other sources. +func resolveAPIOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + o, found, err := getAPIOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.APIOptions = o + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolver(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolver(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolver = endpointResolver + + return nil +} + +// resolveEndpointResolver extracts the first instance of a EndpointResolverFunc from the config slice +// and sets the functions result on the aws.Config.EndpointResolver +func resolveEndpointResolverWithOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + endpointResolver, found, err := getEndpointResolverWithOptions(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.EndpointResolverWithOptions = endpointResolver + + return nil +} + +func resolveLogger(ctx context.Context, cfg *aws.Config, configs configs) error { + logger, found, err := getLogger(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Logger = logger + + return nil +} + +func resolveClientLogMode(ctx context.Context, cfg *aws.Config, configs configs) error { + mode, found, err := getClientLogMode(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.ClientLogMode = mode + + return nil +} + +func resolveRetryer(ctx context.Context, cfg *aws.Config, configs configs) error { + retryer, found, err := getRetryer(ctx, configs) + if err != nil { + return err + } + + if found { + cfg.Retryer = retryer + return nil + } + + // Only load the retry options if a custom retryer has not be specified. + if err = resolveRetryMaxAttempts(ctx, cfg, configs); err != nil { + return err + } + return resolveRetryMode(ctx, cfg, configs) +} + +func resolveEC2IMDSRegion(ctx context.Context, cfg *aws.Config, configs configs) error { + if len(cfg.Region) > 0 { + return nil + } + + region, found, err := getEC2IMDSRegion(ctx, configs) + if err != nil { + return err + } + if !found { + return nil + } + + cfg.Region = region + + return nil +} + +func resolveDefaultsModeOptions(ctx context.Context, cfg *aws.Config, configs configs) error { + defaultsMode, found, err := getDefaultsMode(ctx, configs) + if err != nil { + return err + } + if !found { + defaultsMode = aws.DefaultsModeLegacy + } + + var environment aws.RuntimeEnvironment + if defaultsMode == aws.DefaultsModeAuto { + envConfig, _, _ := getAWSConfigSources(configs) + + client, found, err := getDefaultsModeIMDSClient(ctx, configs) + if err != nil { + return err + } + if !found { + client = imds.NewFromConfig(*cfg) + } + + environment, err = resolveDefaultsModeRuntimeEnvironment(ctx, envConfig, client) + if err != nil { + return err + } + } + + cfg.DefaultsMode = defaultsMode + cfg.RuntimeEnvironment = environment + + return nil +} + +func resolveRetryMaxAttempts(ctx context.Context, cfg *aws.Config, configs configs) error { + maxAttempts, found, err := getRetryMaxAttempts(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMaxAttempts = maxAttempts + + return nil +} + +func resolveRetryMode(ctx context.Context, cfg *aws.Config, configs configs) error { + retryMode, found, err := getRetryMode(ctx, configs) + if err != nil || !found { + return err + } + cfg.RetryMode = retryMode + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go new file mode 100644 index 00000000000..a8ebb3c0a39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go @@ -0,0 +1,122 @@ +package config + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + smithybearer "github.com/aws/smithy-go/auth/bearer" +) + +// resolveBearerAuthToken extracts a token provider from the config sources. +// +// If an explicit bearer authentication token provider is not found the +// resolver will fallback to resolving token provider via other config sources +// such as SharedConfig. +func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) +} + +// resolveBearerAuthTokenProvider extracts the first instance of +// BearerAuthTokenProvider from the config sources. +// +// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure +// the Token is only refreshed when needed. This also protects the +// TokenProvider so it can be used concurrently. +// +// Config providers used: +// * bearerAuthTokenProviderProvider +func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, tokenProvider) + if err != nil { + return false, err + } + + return true, nil +} + +func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + _, sharedConfig, _ := getAWSConfigSources(configs) + + var provider smithybearer.TokenProvider + + if sharedConfig.SSOSession != nil { + provider, err = resolveBearerAuthSSOTokenProvider( + ctx, cfg, sharedConfig.SSOSession, configs) + } + + if err == nil && provider != nil { + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, provider) + } + + return err +} + +func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + + cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) + } + + client := ssooidc.NewFromConfig(*cfg) + provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) + + return provider, nil +} + +// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go +// bearer/auth#TokenCache with the provided options if the provider is not +// already a TokenCache. +func wrapWithBearerAuthTokenCache( + ctx context.Context, + cfgs configs, + provider smithybearer.TokenProvider, + optFns ...func(*smithybearer.TokenCacheOptions), +) (smithybearer.TokenProvider, error) { + _, ok := provider.(*smithybearer.TokenCache) + if ok { + return provider, nil + } + + tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) + if err != nil { + return nil, err + } + + opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) + opts = append(opts, func(o *smithybearer.TokenCacheOptions) { + o.RefreshBeforeExpires = 5 * time.Minute + o.RetrieveBearerTokenTimeout = 30 * time.Second + }) + opts = append(opts, optFns...) + if optionsFound { + opts = append(opts, tokenCacheConfigOptions) + } + + return smithybearer.NewTokenCache(provider, opts...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go new file mode 100644 index 00000000000..b21cd30804d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -0,0 +1,499 @@ +package config + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds" + "github.com/aws/aws-sdk-go-v2/credentials/processcreds" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/service/sso" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/aws-sdk-go-v2/service/sts" +) + +const ( + // valid credential source values + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +var ( + ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing +) + +// resolveCredentials extracts a credential provider from slice of config +// sources. +// +// If an explicit credential provider is not found the resolver will fallback +// to resolving credentials by extracting a credential provider from EnvConfig +// and SharedConfig. +func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveCredentialProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveCredentialChain(ctx, cfg, configs) +} + +// resolveCredentialProvider extracts the first instance of Credentials from the +// config slices. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +// +// Config providers used: +// * credentialsProviderProvider +func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + credProvider, found, err := getCredentialsProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) + if err != nil { + return false, err + } + + return true, nil +} + +// resolveCredentialChain resolves a credential provider chain using EnvConfig +// and SharedConfig if present in the slice of provided configs. +// +// The resolved CredentialProvider will be wrapped in a cache to ensure the +// credentials are only refreshed when needed. This also protects the +// credential provider to be used concurrently. +func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + envConfig, sharedConfig, other := getAWSConfigSources(configs) + + // When checking if a profile was specified programmatically we should only consider the "other" + // configuration sources that have been provided. This ensures we correctly honor the expected credential + // hierarchy. + _, sharedProfileSet, err := getSharedConfigProfile(ctx, other) + if err != nil { + return err + } + + switch { + case sharedProfileSet: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + case envConfig.Credentials.HasKeys(): + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + case len(envConfig.WebIdentityTokenFilePath) > 0: + err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) + default: + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + } + if err != nil { + return err + } + + // Wrap the resolved provider in a cache so the SDK will cache credentials. + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, cfg.Credentials) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { + + switch { + case sharedConfig.Source != nil: + // Assume IAM role with credentials source from a different profile. + err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + + case sharedConfig.Credentials.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + cfg.Credentials = credentials.StaticCredentialsProvider{ + Value: sharedConfig.Credentials, + } + + case len(sharedConfig.CredentialSource) != 0: + err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + + case len(sharedConfig.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + + case sharedConfig.hasSSOConfiguration(): + err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) + + case len(sharedConfig.CredentialProcess) != 0: + // Get credentials from CredentialProcess + err = processCredentials(ctx, cfg, sharedConfig, configs) + + case len(envConfig.ContainerCredentialsEndpoint) != 0: + err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + + case len(envConfig.ContainerCredentialsRelativePath) != 0: + err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + err = resolveEC2RoleCredentials(ctx, cfg, configs) + } + if err != nil { + return err + } + + if len(sharedConfig.RoleARN) > 0 { + return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + } + + return nil +} + +func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + if err := sharedConfig.validateSSOConfiguration(); err != nil { + return err + } + + var options []func(*ssocreds.Options) + v, found, err := getSSOProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + options = append(options, v) + } + + cfgCopy := cfg.Copy() + + if sharedConfig.SSOSession != nil { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + cfgCopy.Region = sharedConfig.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name) + if err != nil { + return err + } + oidcClient := ssooidc.NewFromConfig(cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...) + options = append(options, func(o *ssocreds.Options) { + o.SSOTokenProvider = tokenProvider + o.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = sharedConfig.SSORegion + } + + cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...) + + return nil +} + +func ecsContainerURI(path string) string { + return fmt.Sprintf("%s%s", ecsContainerEndpoint, path) +} + +func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { + var opts []func(*processcreds.Options) + + options, found, err := getProcessCredentialOptions(ctx, configs) + if err != nil { + return err + } + if found { + opts = append(opts, options) + } + + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) + + return nil +} + +func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error { + var resolveErr error + + parsed, err := url.Parse(endpointURL) + if err != nil { + resolveErr = fmt.Errorf("invalid URL, %w", err) + } else { + host := parsed.Hostname() + if len(host) == 0 { + resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL") + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host) + } + } + + if resolveErr != nil { + return resolveErr + } + + return resolveHTTPCredProvider(ctx, cfg, endpointURL, authToken, configs) +} + +func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToken string, configs configs) error { + optFns := []func(*endpointcreds.Options){ + func(options *endpointcreds.Options) { + if len(authToken) != 0 { + options.AuthorizationToken = authToken + } + options.APIOptions = cfg.APIOptions + if cfg.Retryer != nil { + options.Retryer = cfg.Retryer() + } + }, + } + + optFn, found, err := getEndpointCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + provider := endpointcreds.New(url, optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider, func(options *aws.CredentialsCacheOptions) { + options.ExpiryWindow = 5 * time.Minute + }) + if err != nil { + return err + } + + return nil +} + +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + return resolveEC2RoleCredentials(ctx, cfg, configs) + + case credSourceEnvironment: + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + + case credSourceECSContainer: + if len(envConfig.ContainerCredentialsRelativePath) == 0 { + return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set") + } + return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + + default: + return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + } + + return nil +} + +func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { + optFns := make([]func(*ec2rolecreds.Options), 0, 2) + + optFn, found, err := getEC2RoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + optFns = append(optFns, func(o *ec2rolecreds.Options) { + // Only define a client from config if not already defined. + if o.Client == nil { + o.Client = imds.NewFromConfig(*cfg) + } + }) + + provider := ec2rolecreds.New(optFns...) + + cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, provider) + if err != nil { + return err + } + + return nil +} + +func getAWSConfigSources(cfgs configs) (*EnvConfig, *SharedConfig, configs) { + var ( + envConfig *EnvConfig + sharedConfig *SharedConfig + other configs + ) + + for i := range cfgs { + switch c := cfgs[i].(type) { + case EnvConfig: + if envConfig == nil { + envConfig = &c + } + case *EnvConfig: + if envConfig == nil { + envConfig = c + } + case SharedConfig: + if sharedConfig == nil { + sharedConfig = &c + } + case *SharedConfig: + if envConfig == nil { + sharedConfig = c + } + default: + other = append(other, c) + } + } + + if envConfig == nil { + envConfig = &EnvConfig{} + } + + if sharedConfig == nil { + sharedConfig = &SharedConfig{} + } + + return envConfig, sharedConfig, other +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Error is the error message +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, roleARN, sessionName string, configs configs) error { + if len(filepath) == 0 { + return fmt.Errorf("token file path is not set") + } + + optFns := []func(*stscreds.WebIdentityRoleOptions){ + func(options *stscreds.WebIdentityRoleOptions) { + options.RoleSessionName = sessionName + }, + } + + optFn, found, err := getWebIdentityCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + + if found { + optFns = append(optFns, optFn) + } + + opts := stscreds.WebIdentityRoleOptions{ + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&opts) + } + + if len(opts.RoleARN) == 0 { + return fmt.Errorf("role ARN is not set") + } + + client := opts.Client + if client == nil { + client = sts.NewFromConfig(*cfg) + } + + provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...) + + cfg.Credentials = provider + + return nil +} + +func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + optFns := []func(*stscreds.AssumeRoleOptions){ + func(options *stscreds.AssumeRoleOptions) { + options.RoleSessionName = sharedCfg.RoleSessionName + if sharedCfg.RoleDurationSeconds != nil { + if *sharedCfg.RoleDurationSeconds/time.Minute > 15 { + options.Duration = *sharedCfg.RoleDurationSeconds + } + } + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + options.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) != 0 { + options.SerialNumber = aws.String(sharedCfg.MFASerial) + } + }, + } + + optFn, found, err := getAssumeRoleCredentialProviderOptions(ctx, configs) + if err != nil { + return err + } + if found { + optFns = append(optFns, optFn) + } + + { + // Synthesize options early to validate configuration errors sooner to ensure a token provider + // is present if the SerialNumber was set. + var o stscreds.AssumeRoleOptions + for _, fn := range optFns { + fn(&o) + } + if o.TokenProvider == nil && o.SerialNumber != nil { + return AssumeRoleTokenProviderNotSetError{} + } + } + + cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) + + return nil +} + +// wrapWithCredentialsCache will wrap provider with an aws.CredentialsCache +// with the provided options if the provider is not already a +// aws.CredentialsCache. +func wrapWithCredentialsCache( + ctx context.Context, + cfgs configs, + provider aws.CredentialsProvider, + optFns ...func(options *aws.CredentialsCacheOptions), +) (aws.CredentialsProvider, error) { + _, ok := provider.(*aws.CredentialsCache) + if ok { + return provider, nil + } + + credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) + if err != nil { + return nil, err + } + + // force allocation of a new slice if the additional options are + // needed, to prevent overwriting the passed in slice of options. + optFns = optFns[:len(optFns):len(optFns)] + if optionsFound { + optFns = append(optFns, credCacheOptions) + } + + return aws.NewCredentialsCache(provider, optFns...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go new file mode 100644 index 00000000000..20683bf5f07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -0,0 +1,1492 @@ +package config + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + "github.com/aws/aws-sdk-go-v2/internal/ini" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" + "github.com/aws/smithy-go/logging" +) + +const ( + // Prefix to use for filtering profiles. The profile prefix should only + // exist in the shared config file, not the credentials file. + profilePrefix = `profile ` + + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // Prefix for services section. It is referenced in profile via the services + // parameter to configure clients for service-specific parameters. + servicesPrefix = `services` + + // string equivalent for boolean + endpointDiscoveryDisabled = `false` + endpointDiscoveryEnabled = `true` + endpointDiscoveryAuto = `auto` + + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + ssoRegionKey = "sso_region" + ssoStartURLKey = "sso_start_url" + + ssoAccountIDKey = "sso_account_id" + ssoRoleNameKey = "sso_role_name" + + // Additional Config fields + regionKey = `region` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 Disable Multi-Region AccessPoints + s3DisableMultiRegionAccessPointsKey = `s3_disable_multiregion_access_points` + + useFIPSEndpointKey = "use_fips_endpoint" + + defaultsModeKey = "defaults_mode" + + // Retry options + retryMaxAttemptsKey = "max_attempts" + retryModeKey = "retry_mode" + + caBundleKey = "ca_bundle" + + sdkAppID = "sdk_ua_app_id" + + ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" + + endpointURL = "endpoint_url" +) + +// defaultSharedConfigProfile allows for swapping the default profile for testing +var defaultSharedConfigProfile = DefaultSharedConfigProfile + +// DefaultSharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func DefaultSharedCredentialsFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials") +} + +// DefaultSharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func DefaultSharedConfigFilename() string { + return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config") +} + +// DefaultSharedConfigFiles is a slice of the default shared config files that +// the will be used in order to load the SharedConfig. +var DefaultSharedConfigFiles = []string{ + DefaultSharedConfigFilename(), +} + +// DefaultSharedCredentialsFiles is a slice of the default shared credentials +// files that the will be used in order to load the SharedConfig. +var DefaultSharedCredentialsFiles = []string{ + DefaultSharedCredentialsFilename(), +} + +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type SSOSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *SSOSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURLKey) +} + +// Services contains values configured in the services section +// of the AWS configuration file. +type Services struct { + // Services section values + // {"serviceId": {"key": "value"}} + // e.g. {"s3": {"endpoint_url": "example.com"}} + ServiceValues map[string]map[string]string +} + +func (s *Services) setFromIniSection(section ini.Section) { + if s.ServiceValues == nil { + s.ServiceValues = make(map[string]map[string]string) + } + for _, service := range section.List() { + s.ServiceValues[service] = section.Map(service) + } +} + +// SharedConfig represents the configuration fields of the SDK config files. +type SharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Credentials aws.Credentials + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + // SSO session options + SSOSessionName string + SSOSession *SSOSession + + // Legacy SSO session options + SSORegion string + SSOStartURL string + + // SSO fields not used + SSOAccountID string + SSORoleName string + + RoleARN string + ExternalID string + MFASerial string + RoleSessionName string + RoleDurationSeconds *time.Duration + + SourceProfileName string + Source *SharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region = us-west-2 + Region string + + // EnableEndpointDiscovery can be enabled or disabled in the shared config + // by setting endpoint_discovery_enabled to true, or false respectively. + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery aws.EndpointDiscoveryEnableState + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion *bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection + // mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode imds.EndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If + // specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + + // Specifies if the S3 service should disable support for Multi-Region + // access-points + // + // s3_disable_multiregion_access_points=true + S3DisableMultiRegionAccessPoints *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint aws.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint aws.FIPSEndpointState + + // Specifies which defaults mode should be used by services. + // + // defaults_mode=standard + DefaultsMode aws.DefaultsMode + + // Specifies the maximum number attempts an API client will call an + // operation that fails with a retryable error. + // + // max_attempts=3 + RetryMaxAttempts int + + // Specifies the retry model the API client will be created with. + // + // retry_mode=standard + RetryMode aws.RetryMode + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. Only use + // this if you want to configure the SDK to use a custom set of CAs. + // + // Enabling this option will attempt to merge the Transport into the SDK's + // HTTP client. If the client's Transport is not a http.Transport an error + // will be returned. If the Transport's TLS config is set this option will + // cause the SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this + // setting. To use this option and custom HTTP client, the HTTP client + // needs to be provided when creating the config. Not the service client. + // + // ca_bundle=$HOME/my_custom_ca_bundle + CustomCABundle string + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string + + // Value to contain services section content. + Services Services +} + +func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { + if len(c.DefaultsMode) == 0 { + return "", false, nil + } + + return c.DefaultsMode, true, nil +} + +// GetRetryMaxAttempts returns the maximum number of attempts an API client +// created Retryer should attempt an operation call before failing. +func (c SharedConfig) GetRetryMaxAttempts(ctx context.Context) (value int, ok bool, err error) { + if c.RetryMaxAttempts == 0 { + return 0, false, nil + } + + return c.RetryMaxAttempts, true, nil +} + +// GetRetryMode returns the model the API client should create its Retryer in. +func (c SharedConfig) GetRetryMode(ctx context.Context) (value aws.RetryMode, ok bool, err error) { + if len(c.RetryMode) == 0 { + return "", false, nil + } + + return c.RetryMode, true, nil +} + +// GetS3UseARNRegion returns if the S3 service should allow ARNs to direct the region +// the client's requests are sent to. +func (c SharedConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { + if c.S3UseARNRegion == nil { + return false, false, nil + } + + return *c.S3UseARNRegion, true, nil +} + +// GetEnableEndpointDiscovery returns if the enable_endpoint_discovery is set. +func (c SharedConfig) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) { + if c.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset { + return aws.EndpointDiscoveryUnset, false, nil + } + + return c.EnableEndpointDiscovery, true, nil +} + +// GetS3DisableMultiRegionAccessPoints returns if the S3 service should disable support for Multi-Region +// access-points. +func (c SharedConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) { + if c.S3DisableMultiRegionAccessPoints == nil { + return false, false, nil + } + + return *c.S3DisableMultiRegionAccessPoints, true, nil +} + +// GetRegion returns the region for the profile if a region is set. +func (c SharedConfig) getRegion(ctx context.Context) (string, bool, error) { + if len(c.Region) == 0 { + return "", false, nil + } + return c.Region, true, nil +} + +// GetCredentialsProvider returns the credentials for a profile if they were set. +func (c SharedConfig) getCredentialsProvider() (aws.Credentials, bool, error) { + return c.Credentials, true, nil +} + +// GetEC2IMDSEndpointMode implements a EC2IMDSEndpointMode option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpointMode() (imds.EndpointModeState, bool, error) { + if c.EC2IMDSEndpointMode == imds.EndpointModeStateUnset { + return imds.EndpointModeStateUnset, false, nil + } + + return c.EC2IMDSEndpointMode, true, nil +} + +// GetEC2IMDSEndpoint implements a EC2IMDSEndpoint option resolver interface. +func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { + if len(c.EC2IMDSEndpoint) == 0 { + return "", false, nil + } + + return c.EC2IMDSEndpoint, true, nil +} + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} + +// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be +// used for requests. +func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { + if c.UseDualStackEndpoint == aws.DualStackEndpointStateUnset { + return aws.DualStackEndpointStateUnset, false, nil + } + + return c.UseDualStackEndpoint, true, nil +} + +// GetUseFIPSEndpoint returns whether the service's FIPS endpoint should be +// used for requests. +func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEndpointState, found bool, err error) { + if c.UseFIPSEndpoint == aws.FIPSEndpointStateUnset { + return aws.FIPSEndpointStateUnset, false, nil + } + + return c.UseFIPSEndpoint, true, nil +} + +// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was +func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { + if len(c.CustomCABundle) == 0 { + return nil, false, nil + } + + b, err := ioutil.ReadFile(c.CustomCABundle) + if err != nil { + return nil, false, err + } + return bytes.NewReader(b), true, nil +} + +// getAppID returns the sdk app ID if set in shared config profile +func (c SharedConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { + if endpt, ok := service[endpointURL]; ok { + return endpt, true, nil + } + } + return "", false, nil +} + +func normalizeShared(sdkID string) string { + lower := strings.ToLower(sdkID) + return strings.ReplaceAll(lower, " ", "_") +} + +func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { + return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil +} + +// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the +// addition of ignoring when none of the files exist or when the profile +// is not found in any of the files. +func loadSharedConfigIgnoreNotExist(ctx context.Context, configs configs) (Config, error) { + cfg, err := loadSharedConfig(ctx, configs) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistError); ok { + return SharedConfig{}, nil + } + return nil, err + } + + return cfg, nil +} + +// loadSharedConfig uses the configs passed in to load the SharedConfig from file +// The file names and profile name are sourced from the configs. +// +// If profile name is not provided DefaultSharedConfigProfile (default) will +// be used. +// +// If shared config filenames are not provided DefaultSharedConfigFiles will +// be used. +// +// Config providers used: +// * sharedConfigProfileProvider +// * sharedConfigFilesProvider +func loadSharedConfig(ctx context.Context, configs configs) (Config, error) { + var profile string + var configFiles []string + var credentialsFiles []string + var ok bool + var err error + + profile, ok, err = getSharedConfigProfile(ctx, configs) + if err != nil { + return nil, err + } + if !ok { + profile = defaultSharedConfigProfile + } + + configFiles, ok, err = getSharedConfigFiles(ctx, configs) + if err != nil { + return nil, err + } + + credentialsFiles, ok, err = getSharedCredentialsFiles(ctx, configs) + if err != nil { + return nil, err + } + + // setup logger if log configuration warning is seti + var logger logging.Logger + logWarnings, found, err := getLogConfigurationWarnings(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if found && logWarnings { + logger, found, err = getLogger(ctx, configs) + if err != nil { + return SharedConfig{}, err + } + if !found { + logger = logging.NewStandardLogger(os.Stderr) + } + } + + return LoadSharedConfigProfile(ctx, profile, + func(o *LoadSharedConfigOptions) { + o.Logger = logger + o.ConfigFiles = configFiles + o.CredentialsFiles = credentialsFiles + }, + ) +} + +// LoadSharedConfigOptions struct contains optional values that can be used to load the config. +type LoadSharedConfigOptions struct { + + // CredentialsFiles are the shared credentials files + CredentialsFiles []string + + // ConfigFiles are the shared config files + ConfigFiles []string + + // Logger is the logger used to log shared config behavior + Logger logging.Logger +} + +// LoadSharedConfigProfile retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// If config files are not set, SDK will default to using a file at location `.aws/config` if present. +// If credentials files are not set, SDK will default to using a file at location `.aws/credentials` if present. +// No default files are set, if files set to an empty slice. +// +// You can read more about shared config and credentials file location at +// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location +func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { + var option LoadSharedConfigOptions + for _, fn := range optFns { + fn(&option) + } + + if option.ConfigFiles == nil { + option.ConfigFiles = DefaultSharedConfigFiles + } + + if option.CredentialsFiles == nil { + option.CredentialsFiles = DefaultSharedCredentialsFiles + } + + // load shared configuration sections from shared configuration INI options + configSections, err := loadIniFiles(option.ConfigFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processConfigSections(ctx, &configSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + // load shared credentials sections from shared credentials INI options + credentialsSections, err := loadIniFiles(option.CredentialsFiles) + if err != nil { + return SharedConfig{}, err + } + + // check for profile prefix and drop duplicates or invalid profiles + err = processCredentialsSections(ctx, &credentialsSections, option.Logger) + if err != nil { + return SharedConfig{}, err + } + + err = mergeSections(&configSections, credentialsSections) + if err != nil { + return SharedConfig{}, err + } + + cfg := SharedConfig{} + profiles := map[string]struct{}{} + + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { + return SharedConfig{}, err + } + + return cfg, nil +} + +func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + skipSections := map[string]struct{}{} + + for _, section := range sections.List() { + if _, ok := skipSections[section]; ok { + continue + } + + // drop sections from config file that do not have expected prefixes. + switch { + case strings.HasPrefix(section, profilePrefix): + // Rename sections to remove "profile " prefixing to match with + // credentials file. If default is already present, it will be + // dropped. + newName, err := renameProfileSection(section, sections, logger) + if err != nil { + return fmt.Errorf("failed to rename profile section, %w", err) + } + skipSections[newName] = struct{}{} + + case strings.HasPrefix(section, ssoSectionPrefix): + case strings.HasPrefix(section, servicesPrefix): + case strings.EqualFold(section, "default"): + default: + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ + "For use within a shared configuration file, "+ + "a non-default profile must have `profile ` "+ + "prefixed to the profile name.", + section, + ) + } + } + } + return nil +} + +func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { + v, ok := sections.GetSection(section) + if !ok { + return "", fmt.Errorf("error processing profiles within the shared configuration files") + } + + // delete section with profile as prefix + sections.DeleteSection(section) + + // set the value to non-prefixed name in sections. + section = strings.TrimPrefix(section, profilePrefix) + if sections.HasSection(section) { + oldSection, _ := sections.GetSection(section) + v.Logs = append(v.Logs, + fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ + "overriding non-default profile from %s", + v.SourceFile, oldSection.SourceFile)) + sections.DeleteSection(section) + } + + // assign non-prefixed name to section + v.Name = section + sections.SetSection(section, v) + + return section, nil +} + +func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + for _, section := range sections.List() { + // drop profiles with prefix for credential files + if strings.HasPrefix(section, profilePrefix) { + // drop this section, as invalid profile name + sections.DeleteSection(section) + + if logger != nil { + logger.Logf(logging.Debug, + "The profile defined with name `%v` is ignored. A profile with the `profile ` prefix is invalid "+ + "for the shared credentials file.\n", + section, + ) + } + } + } + return nil +} + +func loadIniFiles(filenames []string) (ini.Sections, error) { + mergedSections := ini.NewSections() + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + var v *ini.UnableToReadFile + if ok := errors.As(err, &v); ok { + // Skip files which can't be opened and read for whatever reason. + // We treat such files as empty, and do not fall back to other locations. + continue + } else if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + + // mergeSections into mergedSections + err = mergeSections(&mergedSections, sections) + if err != nil { + return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} + } + } + + return mergedSections, nil +} + +// mergeSections merges source section properties into destination section properties +func mergeSections(dst *ini.Sections, src ini.Sections) error { + for _, sectionName := range src.List() { + srcSection, _ := src.GetSection(sectionName) + + if (!srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey)) || + (srcSection.Has(accessKeyIDKey) && !srcSection.Has(secretAccessKey)) { + srcSection.Errors = append(srcSection.Errors, + fmt.Errorf("partial credentials found for profile %v", sectionName)) + } + + if !dst.HasSection(sectionName) { + dst.SetSection(sectionName, srcSection) + continue + } + + // merge with destination srcSection + dstSection, _ := dst.GetSection(sectionName) + + // errors should be overriden if any + dstSection.Errors = srcSection.Errors + + // Access key id update + if srcSection.Has(accessKeyIDKey) && srcSection.Has(secretAccessKey) { + accessKey := srcSection.String(accessKeyIDKey) + secretKey := srcSection.String(secretAccessKey) + + if dstSection.Has(accessKeyIDKey) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, accessKeyIDKey, + dstSection.SourceFile[accessKeyIDKey], srcSection.SourceFile[accessKeyIDKey])) + } + + // update access key + v, err := ini.NewStringValue(accessKey) + if err != nil { + return fmt.Errorf("error merging access key, %w", err) + } + dstSection.UpdateValue(accessKeyIDKey, v) + + // update secret key + v, err = ini.NewStringValue(secretKey) + if err != nil { + return fmt.Errorf("error merging secret key, %w", err) + } + dstSection.UpdateValue(secretAccessKey, v) + + // update session token + if err = mergeStringKey(&srcSection, &dstSection, sectionName, sessionTokenKey); err != nil { + return err + } + + // update source file to reflect where the static creds came from + dstSection.UpdateSourceFile(accessKeyIDKey, srcSection.SourceFile[accessKeyIDKey]) + dstSection.UpdateSourceFile(secretAccessKey, srcSection.SourceFile[secretAccessKey]) + } + + stringKeys := []string{ + roleArnKey, + sourceProfileKey, + credentialSourceKey, + externalIDKey, + mfaSerialKey, + roleSessionNameKey, + regionKey, + enableEndpointDiscoveryKey, + credentialProcessKey, + webIdentityTokenFileKey, + s3UseARNRegionKey, + s3DisableMultiRegionAccessPointsKey, + ec2MetadataServiceEndpointModeKey, + ec2MetadataServiceEndpointKey, + ec2MetadataV1DisabledKey, + useDualStackEndpoint, + useFIPSEndpointKey, + defaultsModeKey, + retryModeKey, + caBundleKey, + roleDurationSecondsKey, + retryMaxAttemptsKey, + + ssoSessionNameKey, + ssoAccountIDKey, + ssoRegionKey, + ssoRoleNameKey, + ssoStartURLKey, + } + for i := range stringKeys { + if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { + return err + } + } + + // set srcSection on dst srcSection + *dst = dst.SetSection(sectionName, dstSection) + } + + return nil +} + +func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { + if srcSection.Has(key) { + srcValue := srcSection.String(key) + val, err := ini.NewStringValue(srcValue) + if err != nil { + return fmt.Errorf("error merging %s, %w", key, err) + } + + if dstSection.Has(key) { + dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, + dstSection.SourceFile[key], srcSection.SourceFile[key])) + } + + dstSection.UpdateValue(key, val) + dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) + } + return nil +} + +func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { + return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ + "with a %v value found in a duplicate profile defined at file %v. \n", + sectionName, key, dstSourceFile, key, srcSourceFile) +} + +// Returns an error if all of the files fail to load. If at least one file is +// successfully loaded and contains the profile, no error will be returned. +func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile string, + sections ini.Sections, logger logging.Logger) error { + c.Profile = profile + + section, ok := sections.GetSection(profile) + if !ok { + return SharedConfigProfileNotExistError{ + Profile: profile, + } + } + + // if logs are appended to the section, log them + if section.Logs != nil && logger != nil { + for _, log := range section.Logs { + logger.Logf(logging.Debug, log) + } + } + + // set config from the provided INI section + err := c.setFromIniSection(profile, section) + if err != nil { + return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + c.clearAssumeRoleOptions() + } else { + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set + if err := c.validateCredentialsConfig(profile); err != nil { + return err + } + } + + // if not top level profile and has credentials, return with credentials. + if len(profiles) != 0 && c.Credentials.HasKeys() { + return nil + } + + profiles[profile] = struct{}{} + + // validate no colliding credentials type are present + if err := c.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(c.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + c.clearCredentialOptions() + + srcCfg := &SharedConfig{} + err := srcCfg.setFromIniSections(profiles, c.SourceProfileName, sections, logger) + if err != nil { + // SourceProfileName that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + Err: err, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: c.RoleARN, + Profile: c.SourceProfileName, + } + } + + c.Source = srcCfg + } + + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if c.hasSSOTokenProviderConfiguration() { + section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName)) + if !ok { + return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName) + } + var ssoSession SSOSession + ssoSession.setFromIniSection(section) + ssoSession.Name = c.SSOSessionName + c.SSOSession = &ssoSession + } + + for _, sectionName := range sections.List() { + if strings.HasPrefix(sectionName, servicesPrefix) { + section, ok := sections.GetSection(sectionName) + if ok { + var svcs Services + svcs.setFromIniSection(section) + c.Services = svcs + } + } + } + + return nil +} + +// setFromIniSection loads the configuration from the profile section defined in +// the provided INI file. A SharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) error { + if len(section.Name) == 0 { + sources := make([]string, 0) + for _, v := range section.SourceFile { + sources = append(sources, v) + } + + return fmt.Errorf("parsing error : could not find profile section name after processing files: %v", sources) + } + + if len(section.Errors) != 0 { + var errStatement string + for i, e := range section.Errors { + errStatement = fmt.Sprintf("%d, %v\n", i+1, e.Error()) + } + return fmt.Errorf("Error using profile: \n %v", errStatement) + } + + // Assume Role + updateString(&c.RoleARN, section, roleArnKey) + updateString(&c.ExternalID, section, externalIDKey) + updateString(&c.MFASerial, section, mfaSerialKey) + updateString(&c.RoleSessionName, section, roleSessionNameKey) + updateString(&c.SourceProfileName, section, sourceProfileKey) + updateString(&c.CredentialSource, section, credentialSourceKey) + updateString(&c.Region, section, regionKey) + + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&c.SSOSessionName, section, ssoSessionNameKey) + + // Legacy SSO session options + updateString(&c.SSORegion, section, ssoRegionKey) + updateString(&c.SSOStartURL, section, ssoStartURLKey) + + // SSO fields not used + updateString(&c.SSOAccountID, section, ssoAccountIDKey) + updateString(&c.SSORoleName, section, ssoRoleNameKey) + + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for #2276: + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 + if section.Has(roleDurationSecondsKey) { + if v, ok := section.Int(roleDurationSecondsKey); ok { + c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) + } else { + c.RoleDurationSeconds = aws.Duration(time.Duration(0)) + } + } + + updateString(&c.CredentialProcess, section, credentialProcessKey) + updateString(&c.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey) + updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey) + + if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) + } + updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) + + updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) + updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) + + if err := updateDefaultsMode(&c.DefaultsMode, section, defaultsModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", defaultsModeKey, err) + } + + if err := updateInt(&c.RetryMaxAttempts, section, retryMaxAttemptsKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryMaxAttemptsKey, err) + } + if err := updateRetryMode(&c.RetryMode, section, retryModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", retryModeKey, err) + } + + updateString(&c.CustomCABundle, section, caBundleKey) + + // user agent app ID added to request User-Agent header + updateString(&c.AppID, section, sdkAppID) + + updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) + + updateString(&c.BaseEndpoint, section, endpointURL) + + // Shared Credentials + creds := aws.Credentials{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + } + + if creds.HasKeys() { + c.Credentials = creds + } + + return nil +} + +func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + if ok := mode.SetFromString(value); !ok { + return fmt.Errorf("invalid value: %s", value) + } + return nil +} + +func updateRetryMode(mode *aws.RetryMode, section ini.Section, key string) (err error) { + if !section.Has(key) { + return nil + } + value := section.String(key) + if *mode, err = aws.ParseRetryMode(value); err != nil { + return err + } + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *imds.EndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (c *SharedConfig) validateCredentialsConfig(profile string) error { + if err := c.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (c *SharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(c.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(c.CredentialSource) != 0: + credSource = credentialSourceKey + case len(c.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(c.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (c *SharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(c.SourceProfileName) != 0, + len(c.CredentialSource) != 0, + len(c.CredentialProcess) != 0, + len(c.WebIdentityTokenFile) != 0, + ) { + return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token") + } + + return nil +} + +func (c *SharedConfig) validateSSOConfiguration() error { + if c.hasSSOTokenProviderConfiguration() { + err := c.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } + return nil + } + + if c.hasLegacySSOConfiguration() { + err := c.validateLegacySSOConfiguration() + if err != nil { + return err + } + } + return nil +} + +func (c *SharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) + } + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix) + } + + return nil +} + +func (c *SharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURLKey) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil +} + +func (c *SharedConfig) hasCredentials() bool { + switch { + case len(c.SourceProfileName) != 0: + case len(c.CredentialSource) != 0: + case len(c.CredentialProcess) != 0: + case len(c.WebIdentityTokenFile) != 0: + case c.hasSSOConfiguration(): + case c.Credentials.HasKeys(): + default: + return false + } + + return true +} + +func (c *SharedConfig) hasSSOConfiguration() bool { + return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration() +} + +func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *SharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *SharedConfig) clearAssumeRoleOptions() { + c.RoleARN = "" + c.ExternalID = "" + c.MFASerial = "" + c.RoleSessionName = "" + c.SourceProfileName = "" +} + +func (c *SharedConfig) clearCredentialOptions() { + c.CredentialSource = "" + c.CredentialProcess = "" + c.WebIdentityTokenFile = "" + c.Credentials = aws.Credentials{} + c.SSOAccountID = "" + c.SSORegion = "" + c.SSORoleName = "" + c.SSOStartURL = "" +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigLoadError) Unwrap() error { + return e.Err +} + +func (e SharedConfigLoadError) Error() string { + return fmt.Sprintf("failed to load shared config file, %s, %v", e.Filename, e.Err) +} + +// SharedConfigProfileNotExistError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistError struct { + Filename []string + Profile string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigProfileNotExistError) Unwrap() error { + return e.Err +} + +func (e SharedConfigProfileNotExistError) Error() string { + return fmt.Sprintf("failed to get shared config profile, %s", e.Profile) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + Profile string + RoleARN string + Err error +} + +// Unwrap returns the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) Unwrap() error { + return e.Err +} + +func (e SharedConfigAssumeRoleError) Error() string { + return fmt.Sprintf("failed to load assume role %s, of profile %s, %v", + e.RoleARN, e.Profile, e.Err) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateInt will only update the dst with the value in the section key, key +// is present in the section. +// +// Down casts the INI integer value from a int64 to an int, which could be +// different bit size depending on platform. +func updateInt(dst *int, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + + v, ok := section.Int(key) + if !ok { + return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) + } + + *dst = int(v) + return nil +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = new(bool) + **dst = v +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateEndpointDiscoveryType(dst *aws.EndpointDiscoveryEnableState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + value := section.String(key) + if len(value) == 0 { + return + } + + switch { + case strings.EqualFold(value, endpointDiscoveryDisabled): + *dst = aws.EndpointDiscoveryDisabled + case strings.EqualFold(value, endpointDiscoveryEnabled): + *dst = aws.EndpointDiscoveryEnabled + case strings.EqualFold(value, endpointDiscoveryAuto): + *dst = aws.EndpointDiscoveryAuto + } +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = aws.DualStackEndpointStateEnabled + } else { + *dst = aws.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = aws.FIPSEndpointStateEnabled + } else { + *dst = aws.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md new file mode 100644 index 00000000000..65c8de85381 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -0,0 +1,409 @@ +# v1.15.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-11-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.41 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.40 (2023-09-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.39 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.38 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.37 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.36 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.31 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.30 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.29 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.28 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.27 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.26 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.25 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.24 (2023-05-09) + +* No change notes available for this release. + +# v1.13.23 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.22 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.18 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.17 (2023-03-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.16 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.15 (2023-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.14 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-02-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-02-01) + +* No change notes available for this release. + +# v1.13.10 (2023-01-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-01-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2022-12-15) + +* **Bug Fix**: Unify logic between shared config and in finding home directory +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-11-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-11-11) + +* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 +* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider + +# v1.12.24 (2022-11-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-09-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-04-25) + +* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-23) + +* **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-02-24) + +* **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575) +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.5 (2021-12-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.4 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.3 (2021-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-10) + +* **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders. + +# v1.4.0 (2021-08-27) + +* **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723 +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/credentials/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go new file mode 100644 index 00000000000..f6e2873ab90 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/doc.go @@ -0,0 +1,4 @@ +/* +Package credentials provides types for retrieving credentials from credentials sources. +*/ +package credentials diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go new file mode 100644 index 00000000000..6ed71b42b28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -0,0 +1,58 @@ +// Package ec2rolecreds provides the credentials provider implementation for +// retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDK's AWS Config +// +// The EC2 Instance role credentials provider will automatically be the resolved +// credential provider in the credential chain if no other credential provider is +// resolved first. +// +// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance +// role for credentials, you specify a `credentials_source` property in the config +// profile the SDK will load. +// +// [default] +// credential_source = Ec2InstanceMetadata +// +// # Loading credentials with the Provider directly +// +// Another way to use the EC2 Instance role credentials provider is to create it +// directly and assign it as the credentials provider for an API client. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// provider := imds.New(imds.Options{}) +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set the configuration options on the +// credentials provider using the imds.Options type to configure the EC2 IMDS +// API Client and ExpiryWindow of the retrieved credentials. +// +// provider := imds.New(imds.Options{ +// // See imds.Options type's documentation for more options available. +// Client: imds.New(Options{ +// HTTPClient: customHTTPClient, +// }), +// +// // Modify how soon credentials expire prior to their original expiry time. +// ExpiryWindow: 5 * time.Minute, +// }) +// +// # EC2 IMDS API Client +// +// See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on +// configuring the client, and options available. +package ec2rolecreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go new file mode 100644 index 00000000000..5c699f16650 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -0,0 +1,229 @@ +package ec2rolecreds + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "math" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + sdkrand "github.com/aws/aws-sdk-go-v2/internal/rand" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// GetMetadataAPIClient provides the interface for an EC2 IMDS API client for the +// GetMetadata operation. +type GetMetadataAPIClient interface { + GetMetadata(context.Context, *imds.GetMetadataInput, ...func(*imds.Options)) (*imds.GetMetadataOutput, error) +} + +// A Provider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// The New function must be used to create the with a custom EC2 IMDS client. +// +// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ +// o.Client = imds.New(imds.Options{/* custom options */}) +// }) +type Provider struct { + options Options +} + +// Options is a list of user settable options for setting the behavior of the Provider. +type Options struct { + // The API client that will be used by the provider to make GetMetadata API + // calls to EC2 IMDS. + // + // If nil, the provider will default to the EC2 IMDS client. + Client GetMetadataAPIClient +} + +// New returns an initialized Provider value configured to retrieve +// credentials from EC2 Instance Metadata service. +func New(optFns ...func(*Options)) *Provider { + options := Options{} + + for _, fn := range optFns { + fn(&options) + } + + if options.Client == nil { + options.Client = imds.New(imds.Options{}) + } + + return &Provider{ + options: options, + } +} + +// Retrieve retrieves credentials from the EC2 service. Error will be returned +// if the request fails, or unable to extract the desired credentials. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + credsList, err := requestCredList(ctx, p.options.Client) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + if len(credsList) == 0 { + return aws.Credentials{Source: ProviderName}, + fmt.Errorf("unexpected empty EC2 IMDS role list") + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, p.options.Client, credsName) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + creds := aws.Credentials{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + Source: ProviderName, + + CanExpire: true, + Expires: roleCreds.Expiration, + } + + // Cap role credentials Expires to 1 hour so they can be refreshed more + // often. Jitter will be applied credentials cache if being used. + if anHour := sdk.NowTime().Add(1 * time.Hour); creds.Expires.After(anHour) { + creds.Expires = anHour + } + + return creds, nil +} + +// HandleFailToRefresh will extend the credentials Expires time if it it is +// expired. If the credentials will not expire within the minimum time, they +// will be returned. +// +// If the credentials cannot expire, the original error will be returned. +func (p *Provider) HandleFailToRefresh(ctx context.Context, prevCreds aws.Credentials, err error) ( + aws.Credentials, error, +) { + if !prevCreds.CanExpire { + return aws.Credentials{}, err + } + + if prevCreds.Expires.After(sdk.NowTime().Add(5 * time.Minute)) { + return prevCreds, nil + } + + newCreds := prevCreds + randFloat64, err := sdkrand.CryptoRandFloat64() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to get random float, %w", err) + } + + // Random distribution of [5,15) minutes. + expireOffset := time.Duration(randFloat64*float64(10*time.Minute)) + 5*time.Minute + newCreds.Expires = sdk.NowTime().Add(expireOffset) + + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "Attempting credential expiration extension due to a credential service availability issue. A refresh of these credentials will be attempted again in %v minutes.", math.Floor(expireOffset.Minutes())) + + return newCreds, nil +} + +// AdjustExpiresBy will adds the passed in duration to the passed in +// credential's Expires time, unless the time until Expires is less than 15 +// minutes. Returns the credentials, even if not updated. +func (p *Provider) AdjustExpiresBy(creds aws.Credentials, dur time.Duration) ( + aws.Credentials, error, +) { + if !creds.CanExpire { + return creds, nil + } + if creds.Expires.Before(sdk.NowTime().Add(15 * time.Minute)) { + return creds, nil + } + + creds.Expires = creds.Expires.Add(dur) + return creds, nil +} + +// ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. If +// there are no credentials, or there is an error making or receiving the +// request +func requestCredList(ctx context.Context, client GetMetadataAPIClient) ([]string, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: iamSecurityCredsPath, + }) + if err != nil { + return nil, fmt.Errorf("no EC2 IMDS role found, %w", err) + } + defer resp.Content.Close() + + credsList := []string{} + s := bufio.NewScanner(resp.Content) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("failed to read EC2 IMDS role, %w", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(ctx, &imds.GetMetadataInput{ + Path: path.Join(iamSecurityCredsPath, credsName), + }) + if err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, err) + } + defer resp.Content.Close() + + var respCreds ec2RoleCredRespBody + if err := json.NewDecoder(resp.Content).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to decode %s EC2 IMDS role credentials, %w", + credsName, err) + } + + if !strings.EqualFold(respCreds.Code, "Success") { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, + fmt.Errorf("failed to get %s EC2 IMDS role credentials, %w", + credsName, + &smithy.GenericAPIError{Code: respCreds.Code, Message: respCreds.Message}) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go new file mode 100644 index 00000000000..60b8298f86f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -0,0 +1,148 @@ +package client + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID is the client identifer +const ServiceID = "endpoint-credentials" + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is the endpoint client configurable options +type Options struct { + // The endpoint to retrieve credentials from + Endpoint string + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*smithymiddleware.Stack) error +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*smithymiddleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + return to +} + +// Client is an client for retrieving AWS credentials from an endpoint +type Client struct { + options Options +} + +// New constructs a new Client from the given options +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + if options.HTTPClient == nil { + options.HTTPClient = awshttp.NewBuildableClient() + } + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +// GetCredentialsInput is the input to send with the endpoint service to receive credentials. +type GetCredentialsInput struct { + AuthorizationToken string +} + +// GetCredentials retrieves credentials from credential endpoint +func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput, optFns ...func(*Options)) (*GetCredentialsOutput, error) { + stack := smithymiddleware.NewStack("GetCredentials", smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After) + stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After) + stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After) + retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer}) + middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID) + smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) + smithyhttp.AddCloseResponseBodyMiddleware(stack) + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, err + } + } + + handler := smithymiddleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, _, err := handler.Handle(ctx, params) + if err != nil { + return nil, err + } + + return result.(*GetCredentialsOutput), err +} + +// GetCredentialsOutput is the response from the credential endpoint +type GetCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +// EndpointError is an error returned from the endpoint service +type EndpointError struct { + Code string `json:"code"` + Message string `json:"message"` + Fault smithy.ErrorFault `json:"-"` +} + +// Error is the error mesage string +func (e *EndpointError) Error() string { + return fmt.Sprintf("%s: %s", e.Code, e.Message) +} + +// ErrorCode is the error code returned by the endpoint +func (e *EndpointError) ErrorCode() string { + return e.Code +} + +// ErrorMessage is the error message returned by the endpoint +func (e *EndpointError) ErrorMessage() string { + return e.Message +} + +// ErrorFault indicates error fault classification +func (e *EndpointError) ErrorFault() smithy.ErrorFault { + return e.Fault +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go new file mode 100644 index 00000000000..40747a53c18 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go @@ -0,0 +1,120 @@ +package client + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/aws/smithy-go" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type buildEndpoint struct { + Endpoint string +} + +func (b *buildEndpoint) ID() string { + return "BuildEndpoint" +} + +func (b *buildEndpoint) HandleBuild(ctx context.Context, in smithymiddleware.BuildInput, next smithymiddleware.BuildHandler) ( + out smithymiddleware.BuildOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport, %T", in.Request) + } + + if len(b.Endpoint) == 0 { + return out, metadata, fmt.Errorf("endpoint not provided") + } + + parsed, err := url.Parse(b.Endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint, %w", err) + } + + request.URL = parsed + + return next.HandleBuild(ctx, in) +} + +type serializeOpGetCredential struct{} + +func (s *serializeOpGetCredential) ID() string { + return "OperationSerializer" +} + +func (s *serializeOpGetCredential) HandleSerialize(ctx context.Context, in smithymiddleware.SerializeInput, next smithymiddleware.SerializeHandler) ( + out smithymiddleware.SerializeOutput, metadata smithymiddleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type, %T", in.Request) + } + + params, ok := in.Parameters.(*GetCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters, %T", in.Parameters) + } + + const acceptHeader = "Accept" + request.Header[acceptHeader] = append(request.Header[acceptHeader][:0], "application/json") + + if len(params.AuthorizationToken) > 0 { + const authHeader = "Authorization" + request.Header[authHeader] = append(request.Header[authHeader][:0], params.AuthorizationToken) + } + + return next.HandleSerialize(ctx, in) +} + +type deserializeOpGetCredential struct{} + +func (d *deserializeOpGetCredential) ID() string { + return "OperationDeserializer" +} + +func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in smithymiddleware.DeserializeInput, next smithymiddleware.DeserializeHandler) ( + out smithymiddleware.DeserializeOutput, metadata smithymiddleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, deserializeError(response) + } + + var shape *GetCredentialsOutput + if err = json.NewDecoder(response.Body).Decode(&shape); err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize json response, %w", err)} + } + + out.Result = shape + return out, metadata, err +} + +func deserializeError(response *smithyhttp.Response) error { + var errShape *EndpointError + err := json.NewDecoder(response.Body).Decode(&errShape) + if err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)} + } + + if response.StatusCode >= 500 { + errShape.Fault = smithy.FaultServer + } else { + errShape.Fault = smithy.FaultClient + } + + return errShape +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..adc7fc6b000 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -0,0 +1,136 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client" + "github.com/aws/smithy-go/middleware" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +type getCredentialsAPIClient interface { + GetCredentials(context.Context, *client.GetCredentialsInput, ...func(*client.Options)) (*client.GetCredentialsOutput, error) +} + +// Provider satisfies the aws.CredentialsProvider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + // The AWS Client to make HTTP requests to the endpoint with. The endpoint + // the request will be made to is provided by the aws.Config's + // EndpointResolver. + client getCredentialsAPIClient + + options Options +} + +// HTTPClient is a client for sending HTTP requests +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Options is structure of configurable options for Provider +type Options struct { + // Endpoint to retrieve credentials from. Required + Endpoint string + + // HTTPClient to handle sending HTTP requests to the target endpoint. + HTTPClient HTTPClient + + // Set of options to modify how the credentials operation is invoked. + APIOptions []func(*middleware.Stack) error + + // The Retryer to be used for determining whether a failed requested should be retried + Retryer aws.Retryer + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string +} + +// New returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func New(endpoint string, optFns ...func(*Options)) *Provider { + o := Options{ + Endpoint: endpoint, + } + + for _, fn := range optFns { + fn(&o) + } + + p := &Provider{ + client: client.New(client.Options{ + HTTPClient: o.HTTPClient, + Endpoint: o.Endpoint, + APIOptions: o.APIOptions, + Retryer: o.Retryer, + }), + options: o, + } + + return p +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to load credentials, %w", err) + } + + creds := aws.Credentials{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + Source: ProviderName, + } + + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) { + return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken}) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go new file mode 100644 index 00000000000..365a8554529 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package credentials + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.15.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go new file mode 100644 index 00000000000..a3137b8fa9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go @@ -0,0 +1,92 @@ +// Package processcreds is a credentials provider to retrieve credentials from a +// external CLI invoked process. +// +// WARNING: The following describes a method of sourcing credentials from an external +// process. This can potentially be dangerous, so proceed with caution. Other +// credential providers should be preferred if at all possible. If using this +// option, you should make sure that the config file is as locked down as possible +// using security best practices for your operating system. +// +// # Concurrency and caching +// +// The Provider is not safe to be used concurrently, and does not provide any +// caching of credentials retrieved. You should wrap the Provider with a +// `aws.CredentialsCache` to provide concurrency safety, and caching of +// credentials. +// +// # Loading credentials with the SDKs AWS Config +// +// You can use credentials from a AWS shared config `credential_process` in a +// variety of ways. +// +// One way is to setup your shared config file, located in the default +// location, with the `credential_process` key and the command you want to be +// called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +// (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. +// +// [default] +// credential_process = /command/to/call +// +// Loading configuration using external will use the credential process to +// retrieve credentials. NOTE: If there are credentials in the profile you are +// using, the credential process will not be used. +// +// // Initialize a session to load credentials. +// cfg, _ := config.LoadDefaultConfig(context.TODO()) +// +// // Create S3 service client to use the credentials. +// svc := s3.NewFromConfig(cfg) +// +// # Loading credentials with the Provider directly +// +// Another way to use the credentials process provider is by using the +// `NewProvider` constructor to create the provider and providing a it with a +// command to be executed to retrieve credentials. +// +// The following example creates a credentials provider for a command, and wraps +// it with the CredentialsCache before assigning the provider to the Amazon S3 API +// client's Credentials option. +// +// // Create credentials using the Provider. +// provider := processcreds.NewProvider("/path/to/command") +// +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) +// +// If you need more control, you can set any configurable options in the +// credentials using one or more option functions. +// +// provider := processcreds.NewProvider("/path/to/command", +// func(o *processcreds.Options) { +// // Override the provider's default timeout +// o.Timeout = 2 * time.Minute +// }) +// +// You can also use your own `exec.Cmd` value by satisfying a value that satisfies +// the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. +// +// // Create an exec.Cmd +// cmdBuilder := processcreds.NewCommandBuilderFunc( +// func(ctx context.Context) (*exec.Cmd, error) { +// cmd := exec.CommandContext(ctx, +// "customCLICommand", +// "-a", "argument", +// ) +// cmd.Env = []string{ +// "ENV_VAR_FOO=value", +// "ENV_VAR_BAR=other_value", +// } +// +// return cmd, nil +// }, +// ) +// +// // Create credentials using your exec.Cmd and custom timeout +// provider := processcreds.NewProviderCommand(cmdBuilder, +// func(opt *processcreds.Provider) { +// // optionally override the provider's default timeout +// opt.Timeout = 1 * time.Second +// }) +package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go new file mode 100644 index 00000000000..fe9345e287c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -0,0 +1,281 @@ +package processcreds + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProviderError is an error indicating failure initializing or executing the +// process credentials provider +type ProviderError struct { + Err error +} + +// Error returns the error message. +func (e *ProviderError) Error() string { + return fmt.Sprintf("process provider error: %v", e.Err) +} + +// Unwrap returns the underlying error the provider error wraps. +func (e *ProviderError) Unwrap() error { + return e.Err +} + +// Provider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type Provider struct { + // Provides a constructor for exec.Cmd that are invoked by the provider for + // retrieving credentials. Use this to provide custom creation of exec.Cmd + // with things like environment variables, or other configuration. + // + // The provider defaults to the DefaultNewCommand function. + commandBuilder NewCommandBuilder + + options Options +} + +// Options is the configuration options for configuring the Provider. +type Options struct { + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCommandBuilder provides the interface for specifying how command will be +// created that the Provider will use to retrieve credentials with. +type NewCommandBuilder interface { + NewCommand(context.Context) (*exec.Cmd, error) +} + +// NewCommandBuilderFunc provides a wrapper type around a function pointer to +// satisfy the NewCommandBuilder interface. +type NewCommandBuilderFunc func(context.Context) (*exec.Cmd, error) + +// NewCommand calls the underlying function pointer the builder was initialized with. +func (fn NewCommandBuilderFunc) NewCommand(ctx context.Context) (*exec.Cmd, error) { + return fn(ctx) +} + +// DefaultNewCommandBuilder provides the default NewCommandBuilder +// implementation used by the provider. It takes a command and arguments to +// invoke. The command will also be initialized with the current process +// environment variables, stderr, and stdin pipes. +type DefaultNewCommandBuilder struct { + Args []string +} + +// NewCommand returns an initialized exec.Cmd with the builder's initialized +// Args. The command is also initialized current process environment variables, +// stderr, and stdin pipes. +func (b DefaultNewCommandBuilder) NewCommand(ctx context.Context) (*exec.Cmd, error) { + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(b.Args) == 0 { + return nil, &ProviderError{ + Err: fmt.Errorf("failed to prepare command: command must not be empty"), + } + } + + cmdArgs = append(cmdArgs, b.Args...) + cmd := exec.CommandContext(ctx, cmdArgs[0], cmdArgs[1:]...) + cmd.Env = os.Environ() + + cmd.Stderr = os.Stderr // display stderr on console for MFA + cmd.Stdin = os.Stdin // enable stdin for MFA + + return cmd, nil +} + +// NewProvider returns a pointer to a new Credentials object wrapping the +// Provider. +// +// The provider defaults to the DefaultNewCommandBuilder for creating command +// the Provider will use to retrieve credentials with. +func NewProvider(command string, options ...func(*Options)) *Provider { + var args []string + + // Ensure that the command arguments are not set if the provided command is + // empty. This will error out when the command is executed since no + // arguments are specified. + if len(command) > 0 { + args = []string{command} + } + + commanBuilder := DefaultNewCommandBuilder{ + Args: args, + } + return NewProviderCommand(commanBuilder, options...) +} + +// NewProviderCommand returns a pointer to a new Credentials object with the +// specified command, and default timeout duration. Use this to provide custom +// creation of exec.Cmd for options like environment variables, or other +// configuration. +func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *Provider { + p := &Provider{ + commandBuilder: builder, + options: Options{ + Timeout: DefaultTimeout, + }, + } + + for _, option := range options { + option(&p.options) + } + + return p +} + +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. + SecretAccessKey string + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time +} + +// Retrieve executes the credential process command and returns the +// credentials, or error if the command fails. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + out, err := p.executeCredentialProcess(ctx) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + // Serialize and validate response + resp := &CredentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err), + } + } + + if resp.Version != 1 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("wrong version in process output (not 1)"), + } + } + + if len(resp.AccessKeyID) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing AccessKeyId in process output"), + } + } + + if len(resp.SecretAccessKey) == 0 { + return aws.Credentials{Source: ProviderName}, &ProviderError{ + Err: fmt.Errorf("missing SecretAccessKey in process output"), + } + } + + creds := aws.Credentials{ + Source: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + } + + // Handle expiration + if resp.Expiration != nil { + creds.CanExpire = true + creds.Expires = *resp.Expiration + } + + return creds, nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) { + if p.options.Timeout >= 0 { + var cancelFunc func() + ctx, cancelFunc = context.WithTimeout(ctx, p.options.Timeout) + defer cancelFunc() + } + + cmd, err := p.commandBuilder.NewCommand(ctx) + if err != nil { + return nil, err + } + + // get creds json on process's stdout + output := bytes.NewBuffer(make([]byte, 0, int(8*sdkio.KibiByte))) + if cmd.Stdout != nil { + cmd.Stdout = io.MultiWriter(cmd.Stdout, output) + } else { + cmd.Stdout = output + } + + execCh := make(chan error, 1) + go executeCommand(cmd, execCh) + + select { + case execError := <-execCh: + if execError == nil { + break + } + select { + case <-ctx.Done(): + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("credential process timed out: %w", execError), + } + default: + return output.Bytes(), &ProviderError{ + Err: fmt.Errorf("error in credential_process: %w", execError), + } + } + } + + out := output.Bytes() + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = bytes.ReplaceAll(out, []byte(`\"`), []byte(`"`)) + } + + return out, nil +} + +func executeCommand(cmd *exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go new file mode 100644 index 00000000000..ece1e65f73b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -0,0 +1,81 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS +// credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS +// SSO login flow. The SDK provider expects that you have already performed the +// SSO login flow using AWS CLI using the "aws sso login" command, or by some +// other mechanism. The provider must find a valid non-expired access token for +// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not +// found, it is expired, or the file is malformed an error will be returned. +// +// # Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// specifying the required keys in the profile and referencing an sso-session: +// +// sso_session +// sso_account_id +// sso_role_name +// +// For example, the following defines a profile "devsso" and specifies the AWS +// SSO parameters that defines the target account, role, sign-on portal, and +// the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_session = dev-session +// sso_role_name = SSOReadOnlyRole +// sso_account_id = 123456789012 +// +// [sso-session dev-session] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_region = us-east-1 +// sso_registration_scopes = sso:account:access +// +// Using the config module, you can load the AWS SDK shared configuration, and +// specify that this profile be used to retrieve credentials. For example: +// +// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) +// if err != nil { +// return err +// } +// +// # Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, +// and provide the necessary information to load and retrieve temporary +// credentials using an access token from ~/.aws/sso/cache. +// +// ssoClient := sso.NewFromConfig(cfg) +// ssoOidcClient := ssooidc.NewFromConfig(cfg) +// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session") +// if err != nil { +// return err +// } +// +// var provider aws.CredentialsProvider +// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) { +// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath) +// }) +// +// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time +// provider = aws.NewCredentialsCache(provider) +// +// credentials, err := provider.Retrieve(context.TODO()) +// if err != nil { +// return err +// } +// +// It is important that you wrap the Provider with aws.CredentialsCache if you +// are programmatically constructing the provider directly. This prevents your +// application from accessing the cached access token and requesting new +// credentials each time the credentials are used. +// +// # Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: +// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000000..3b97e6dd406 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,233 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/shareddefaults" +) + +var osUserHomeDur = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := osUserHomeDur() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type token struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +func (t token) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +func (t *token) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %w", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (token, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) + } + + var t token + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return token{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %w", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %w", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %w", err) + } + + return nil +} + +type rfc3339 time.Time + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) + } + + return rfc3339(parsed), nil +} + +func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { + var value string + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // unquoting rules. + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + + return nil +} + +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go new file mode 100644 index 00000000000..b3cf7853e76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -0,0 +1,152 @@ +package ssocreds + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sso" +) + +// ProviderName is the name of the provider used to specify the source of +// credentials. +const ProviderName = "SSOProvider" + +// GetRoleCredentialsAPIClient is a API client that implements the +// GetRoleCredentials operation. +type GetRoleCredentialsAPIClient interface { + GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( + *sso.GetRoleCredentialsOutput, error, + ) +} + +// Options is the Provider options structure. +type Options struct { + // The Client which is configured for the AWS Region where the AWS SSO user + // portal is located. + Client GetRoleCredentialsAPIClient + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) + // user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + SSOTokenProvider *SSOTokenProvider +} + +// Provider is an AWS credential provider that retrieves temporary AWS +// credentials by exchanging an SSO login token. +type Provider struct { + options Options + + cachedTokenFilepath string +} + +// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The +// provided client is expected to be configured for the AWS Region where the +// AWS SSO user portal is located. +func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { + options := Options{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(&options) + } + + return &Provider{ + options: options, + cachedTokenFilepath: options.CachedTokenFilepath, + } +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon +// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present +// in ~/.aws/sso/cache. However, if a token provider configuration exists +// in the shared config, then we ought to use the token provider rather then +// direct access on the cached token. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + var accessToken *string + if p.options.SSOTokenProvider != nil { + token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return aws.Credentials{}, err + } + accessToken = &token.Value + } else { + if p.cachedTokenFilepath == "" { + cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + p.cachedTokenFilepath = cachedTokenFilepath + } + + tokenFile, err := loadCachedToken(p.cachedTokenFilepath) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + + if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { + return aws.Credentials{}, &InvalidTokenError{} + } + accessToken = &tokenFile.AccessToken + } + + output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: accessToken, + AccountId: &p.options.AccountID, + RoleName: &p.options.RoleName, + }) + if err != nil { + return aws.Credentials{}, err + } + + return aws.Credentials{ + AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.ToString(output.RoleCredentials.SessionToken), + CanExpire: true, + Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), + Source: ProviderName, + }, nil +} + +// InvalidTokenError is the error type that is returned if loaded token has +// expired or is otherwise invalid. To refresh the SSO session run AWS SSO +// login with the corresponding profile. +type InvalidTokenError struct { + Err error +} + +func (i *InvalidTokenError) Unwrap() error { + return i.Err +} + +func (i *InvalidTokenError) Error() string { + const msg = "the SSO session has expired or is invalid" + if i.Err == nil { + return msg + } + return msg + ": " + i.Err.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go new file mode 100644 index 00000000000..7f4fc546772 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go @@ -0,0 +1,147 @@ +package ssocreds + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/smithy-go/auth/bearer" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( + *ssooidc.CreateTokenOutput, error, + ) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The set of API Client options to be applied when invoking the + // CreateToken operation. + ClientOptions []func(*ssooidc.Options) + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in +// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's +// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with +// the smithy-go TokenCache, if the external configuration loaded configured +// for an SSO session. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(ctx, cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) + } + } + + expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { + if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { + return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ + ClientId: &cachedToken.ClientID, + ClientSecret: &cachedToken.ClientSecret, + RefreshToken: &cachedToken.RefreshToken, + GrantType: aws.String("refresh_token"), + }, p.options.ClientOptions...) + if err != nil { + return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) + } + + expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) + + cachedToken.AccessToken = aws.ToString(createResult.AccessToken) + cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) + cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { + return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) + } + + return cachedToken, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go new file mode 100644 index 00000000000..d525cac0960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -0,0 +1,53 @@ +package credentials + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +const ( + // StaticCredentialsName provides a name of Static provider + StaticCredentialsName = "StaticCredentials" +) + +// StaticCredentialsEmptyError is emitted when static credentials are empty. +type StaticCredentialsEmptyError struct{} + +func (*StaticCredentialsEmptyError) Error() string { + return "static credentials are empty" +} + +// A StaticCredentialsProvider is a set of credentials which are set, and will +// never expire. +type StaticCredentialsProvider struct { + Value aws.Credentials +} + +// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS +// credentials passed in. +func NewStaticCredentialsProvider(key, secret, session string) StaticCredentialsProvider { + return StaticCredentialsProvider{ + Value: aws.Credentials{ + AccessKeyID: key, + SecretAccessKey: secret, + SessionToken: session, + }, + } +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s StaticCredentialsProvider) Retrieve(_ context.Context) (aws.Credentials, error) { + v := s.Value + if v.AccessKeyID == "" || v.SecretAccessKey == "" { + return aws.Credentials{ + Source: StaticCredentialsName, + }, &StaticCredentialsEmptyError{} + } + + if len(v.Source) == 0 { + v.Source = StaticCredentialsName + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000000..289707b6de4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,320 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +// +// The SDK will ensure that per instance of credentials.Credentials all requests +// to refresh the credentials will be synchronized. But, the SDK is unable to +// ensure synchronous usage of the AssumeRoleProvider if the value is shared +// between multiple Credentials or service clients. +// +// # Assume Role +// +// To assume an IAM role using STS with the SDK you can create a new Credentials +// with the SDKs's stscreds package. +// +// // Initial credentials loaded from SDK's default credential chain. Such as +// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance +// // Role. These credentials will be used to to make the STS Assume Role API. +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN. +// stsSvc := sts.NewFromConfig(cfg) +// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with custom MFA Token provider +// +// To assume an IAM role with a MFA token you can either specify a custom MFA +// token provider or use the SDK's built in StdinTokenProvider that will prompt +// the user for a token code each time the credentials need to to be refreshed. +// Specifying a custom token provider allows you to control where the token +// code is retrieved from, and how it is refreshed. +// +// With a custom token provider, the provider is responsible for refreshing the +// token code when called. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// staticTokenProvider := func() (string, error) { +// return someTokenCode, nil +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = staticTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +// +// # Assume Role with MFA Token Provider +// +// To assume an IAM role with MFA for longer running tasks where the credentials +// may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +// will allow the credential provider to prompt for new MFA token code when the +// role's credentials need to be refreshed. +// +// The StdinTokenProvider function is available to prompt on stdin to retrieve +// the MFA token code from the user. You can also implement custom prompts by +// satisfying the TokenProvider function signature. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely. +// +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } +// +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = stscreds.StdinTokenProvider +// }) +// +// cfg.Credentials = aws.NewCredentialsCache(creds) +// +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) +package stscreds + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoleAPIClient is a client capable of the STS AssumeRole operation. +type AssumeRoleAPIClient interface { + AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the +// credentials will be valid for. This value is only used by AssumeRoleProvider +// for specifying the default expiry duration of an assume role. +// +// Other providers such as WebIdentityRoleProvider do not use this value, and +// instead rely on STS API's default parameter handing to assign a default +// value. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + options AssumeRoleOptions +} + +// AssumeRoleOptions is the configurable options for AssumeRoleProvider +type AssumeRoleOptions struct { + // Client implementation of the AssumeRole operation. Required + Client AssumeRoleAPIClient + + // IAM Role ARN to be assumed. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyARNs []types.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + SourceIdentity *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is set. + TokenProvider func() (string, error) + + // A list of session tags that you want to pass. Each session tag consists of a key + // name and an associated value. For more information about session tags, see + // Tagging STS Sessions + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the + // IAM User Guide. This parameter is optional. You can pass up to 50 session tags. + Tags []types.Tag + + // A list of keys for session tags that you want to set as transitive. If you set a + // tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. + TransitiveTagKeys []string +} + +// NewAssumeRoleProvider constructs and returns a credentials provider that +// will retrieve credentials by assuming a IAM role using STS. +func NewAssumeRoleProvider(client AssumeRoleAPIClient, roleARN string, optFns ...func(*AssumeRoleOptions)) *AssumeRoleProvider { + o := AssumeRoleOptions{ + Client: client, + RoleARN: roleARN, + } + + for _, fn := range optFns { + fn(&o) + } + + return &AssumeRoleProvider{ + options: o, + } +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + // Apply defaults where parameters are not set. + if len(p.options.RoleSessionName) == 0 { + // Try to work out a role name that will hopefully end up unique. + p.options.RoleSessionName = fmt.Sprintf("aws-go-sdk-%d", time.Now().UTC().UnixNano()) + } + if p.options.Duration == 0 { + // Expire as often as AWS permits. + p.options.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int32(int32(p.options.Duration / time.Second)), + PolicyArns: p.options.PolicyARNs, + RoleArn: aws.String(p.options.RoleARN), + RoleSessionName: aws.String(p.options.RoleSessionName), + ExternalId: p.options.ExternalID, + SourceIdentity: p.options.SourceIdentity, + Tags: p.options.Tags, + TransitiveTagKeys: p.options.TransitiveTagKeys, + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + if p.options.SerialNumber != nil { + if p.options.TokenProvider != nil { + input.SerialNumber = p.options.SerialNumber + code, err := p.options.TokenProvider() + if err != nil { + return aws.Credentials{}, err + } + input.TokenCode = aws.String(code) + } else { + return aws.Credentials{}, fmt.Errorf("assume role with MFA enabled, but TokenProvider is not set") + } + } + + resp, err := p.options.Client.AssumeRole(ctx, input) + if err != nil { + return aws.Credentials{Source: ProviderName}, err + } + + return aws.Credentials{ + AccessKeyID: *resp.Credentials.AccessKeyId, + SecretAccessKey: *resp.Credentials.SecretAccessKey, + SessionToken: *resp.Credentials.SessionToken, + Source: ProviderName, + + CanExpire: true, + Expires: *resp.Credentials.Expiration, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..ddaf6df6ce1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,150 @@ +package stscreds + +import ( + "context" + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/aws-sdk-go-v2/service/sts/types" +) + +var invalidIdentityTokenExceptionCode = (&types.InvalidIdentityTokenException{}).ErrorCode() + +const ( + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// AssumeRoleWithWebIdentityAPIClient is a client capable of the STS AssumeRoleWithWebIdentity operation. +type AssumeRoleWithWebIdentityAPIClient interface { + AssumeRoleWithWebIdentity(ctx context.Context, params *sts.AssumeRoleWithWebIdentityInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleWithWebIdentityOutput, error) +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + options WebIdentityRoleOptions +} + +// WebIdentityRoleOptions is a structure of configurable options for WebIdentityRoleProvider +type WebIdentityRoleOptions struct { + // Client implementation of the AssumeRoleWithWebIdentity operation. Required + Client AssumeRoleWithWebIdentityAPIClient + + // JWT Token Provider. Required + TokenRetriever IdentityTokenRetriever + + // IAM Role ARN to assume. Required + RoleARN string + + // Session name, if you wish to uniquely identify this session. + RoleSessionName string + + // Expiry duration of the STS credentials. STS will assign a default expiry + // duration if this value is unset. This is different from the Duration + // option of AssumeRoleProvider, which automatically assigns 15 minutes if + // Duration is unset. + // + // See the STS AssumeRoleWithWebIdentity API reference guide for more + // information on defaults. + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + Duration time.Duration + + // An IAM policy in JSON format that you want to use as an inline session policy. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you + // want to use as managed session policies. The policies must exist in the + // same account as the role. + PolicyARNs []types.PolicyDescriptorType +} + +// IdentityTokenRetriever is an interface for retrieving a JWT +type IdentityTokenRetriever interface { + GetIdentityToken() ([]byte, error) +} + +// IdentityTokenFile is for retrieving an identity token from the given file name +type IdentityTokenFile string + +// GetIdentityToken retrieves the JWT token from the file and returns the contents as a []byte +func (j IdentityTokenFile) GetIdentityToken() ([]byte, error) { + b, err := ioutil.ReadFile(string(j)) + if err != nil { + return nil, fmt.Errorf("unable to read file at %s: %v", string(j), err) + } + + return b, nil +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.ClientAPI +func NewWebIdentityRoleProvider(client AssumeRoleWithWebIdentityAPIClient, roleARN string, tokenRetriever IdentityTokenRetriever, optFns ...func(*WebIdentityRoleOptions)) *WebIdentityRoleProvider { + o := WebIdentityRoleOptions{ + Client: client, + RoleARN: roleARN, + TokenRetriever: tokenRetriever, + } + + for _, fn := range optFns { + fn(&o) + } + + return &WebIdentityRoleProvider{options: o} +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, error) { + b, err := p.options.TokenRetriever.GetIdentityToken() + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve jwt from provide source, %w", err) + } + + sessionName := p.options.RoleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + } + input := &sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.options.PolicyARNs, + RoleArn: &p.options.RoleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + } + if p.options.Duration != 0 { + // If set use the value, otherwise STS will assign a default expiration duration. + input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + + resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { + options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) + }) + if err != nil { + return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) + } + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + + value := aws.Credentials{ + AccessKeyID: aws.ToString(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.ToString(resp.Credentials.SecretAccessKey), + SessionToken: aws.ToString(resp.Credentials.SessionToken), + Source: WebIdentityProviderName, + CanExpire: true, + Expires: *resp.Credentials.Expiration, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md new file mode 100644 index 00000000000..c566941f9e8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -0,0 +1,253 @@ +# v1.14.3 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-11-02) + +* No change notes available for this release. + +# v1.14.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.11 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.10 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-03-14) + +* **Feature**: Add flag to disable IMDSv1 fallback + +# v1.12.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.19 (2022-10-24) + +* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout. +* **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-07-15) + +* **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go new file mode 100644 index 00000000000..46e144d9363 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -0,0 +1,348 @@ +package imds + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalconfig "github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ServiceID provides the unique name of this API client +const ServiceID = "ec2imds" + +// Client provides the API client for interacting with the Amazon EC2 Instance +// Metadata Service API. +type Client struct { + options Options +} + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState = internalconfig.ClientEnableState + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = internalconfig.ClientDefaultEnableState // default behavior + ClientDisabled ClientEnableState = internalconfig.ClientDisabled // client disabled + ClientEnabled ClientEnableState = internalconfig.ClientEnabled // client enabled +) + +// EndpointModeState is an enum configuration variable describing the client endpoint mode. +// Not configurable directly, but used when using the NewFromConfig. +type EndpointModeState = internalconfig.EndpointModeState + +// Enumeration values for EndpointModeState +const ( + EndpointModeStateUnset EndpointModeState = internalconfig.EndpointModeStateUnset + EndpointModeStateIPv4 EndpointModeState = internalconfig.EndpointModeStateIPv4 + EndpointModeStateIPv6 EndpointModeState = internalconfig.EndpointModeStateIPv6 +) + +const ( + disableClientEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Client endpoint options + endpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" + + defaultIPv4Endpoint = "http://169.254.169.254" + defaultIPv6Endpoint = "http://[fd00:ec2::254]" +) + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + for _, fn := range optFns { + fn(&options) + } + + options.HTTPClient = resolveHTTPClient(options.HTTPClient) + + if options.Retryer == nil { + options.Retryer = retry.NewStandard() + } + options.Retryer = retry.AddWithMaxBackoffDelay(options.Retryer, 1*time.Second) + + if options.ClientEnableState == ClientDefaultEnableState { + if v := os.Getenv(disableClientEnvVar); strings.EqualFold(v, "true") { + options.ClientEnableState = ClientDisabled + } + } + + if len(options.Endpoint) == 0 { + if v := os.Getenv(endpointEnvVar); len(v) != 0 { + options.Endpoint = v + } + } + + client := &Client{ + options: options, + } + + if client.options.tokenProvider == nil && !client.options.disableAPIToken { + client.options.tokenProvider = newTokenProvider(client, defaultTokenTTL) + } + + return client +} + +// NewFromConfig returns an initialized Client based the AWS SDK config, and +// functional options. Provide additional functional options to further +// configure the behavior of the client, such as changing the client's endpoint +// or adding custom middleware behavior. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...), + HTTPClient: cfg.HTTPClient, + ClientLogMode: cfg.ClientLogMode, + Logger: cfg.Logger, + } + + if cfg.Retryer != nil { + opts.Retryer = cfg.Retryer() + } + + resolveClientEnableState(cfg, &opts) + resolveEndpointConfig(cfg, &opts) + resolveEndpointModeConfig(cfg, &opts) + resolveEnableFallback(cfg, &opts) + + return New(opts, optFns...) +} + +// Options provides the fields for configuring the API client's behavior. +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation + // call to modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The endpoint the client will use to retrieve EC2 instance metadata. + // + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EndpointMode. + // + // If unset, and the environment variable AWS_EC2_METADATA_SERVICE_ENDPOINT + // has a value the client will use the value of the environment variable as + // the endpoint for operation calls. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + Endpoint string + + // The endpoint selection mode the client will use if no explicit endpoint is provided using the Endpoint field. + // + // Setting EndpointMode to EndpointModeStateIPv4 will configure the client to use the default EC2 IPv4 endpoint. + // Setting EndpointMode to EndpointModeStateIPv6 will configure the client to use the default EC2 IPv6 endpoint. + // + // By default if EndpointMode is not set (EndpointModeStateUnset) than the default endpoint selection mode EndpointModeStateIPv4. + EndpointMode EndpointModeState + + // The HTTP client to invoke API calls with. Defaults to client's default + // HTTP implementation if nil. + HTTPClient HTTPClient + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. + Retryer aws.Retryer + + // Changes if the EC2 Instance Metadata client is enabled or not. Client + // will default to enabled if not set to ClientDisabled. When the client is + // disabled it will return an error for all operation calls. + // + // If ClientEnableState value is ClientDefaultEnableState (default value), + // and the environment variable "AWS_EC2_METADATA_DISABLED" is set to + // "true", the client will be disabled. + // + // AWS_EC2_METADATA_DISABLED=true + ClientEnableState ClientEnableState + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // Configure IMDSv1 fallback behavior. By default, the client will attempt + // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary] + // the client will return any errors encountered from attempting to fetch a token + // instead of silently using the insecure data flow of IMDSv1. + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EnableFallback aws.Ternary + + // provides the caching of API tokens used for operation calls. If unset, + // the API token will not be retrieved for the operation. + tokenProvider *tokenProvider + + // option to disable the API token provider for testing. + disableAPIToken bool +} + +// HTTPClient provides the interface for a client making HTTP requests with the +// API. +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a copy of the API options. +func (o Options) Copy() Options { + to := o + to.APIOptions = append([]func(*middleware.Stack) error{}, o.APIOptions...) + return to +} + +// WithAPIOptions wraps the API middleware functions, as a functional option +// for the API Client Options. Use this helper to add additional functional +// options to the API client, or operation calls. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), + stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + if options.ClientEnableState == ClientDisabled { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: fmt.Errorf( + "access disabled to EC2 IMDS via client option, or %q environment variable", + disableClientEnvVar), + } + } + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + return nil, metadata, &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + + return result, metadata, err +} + +const ( + // HTTP client constants + defaultDialerTimeout = 250 * time.Millisecond + defaultResponseHeaderTimeout = 500 * time.Millisecond +) + +func resolveHTTPClient(client HTTPClient) HTTPClient { + if client == nil { + client = awshttp.NewBuildableClient() + } + + if c, ok := client.(*awshttp.BuildableClient); ok { + client = c. + WithDialerOptions(func(d *net.Dialer) { + // Use a custom Dial timeout for the EC2 Metadata service to account + // for the possibility the application might not be running in an + // environment with the service present. The client should fail fast in + // this case. + d.Timeout = defaultDialerTimeout + }). + WithTransportOptions(func(tr *http.Transport) { + // Use a custom Transport timeout for the EC2 Metadata service to + // account for the possibility that the application might be running in + // a container, and EC2Metadata service drops the connection after a + // single IP Hop. The client should fail fast in this case. + tr.ResponseHeaderTimeout = defaultResponseHeaderTimeout + }) + } + + return client +} + +func resolveClientEnableState(cfg aws.Config, options *Options) error { + if options.ClientEnableState != ClientDefaultEnableState { + return nil + } + value, found, err := internalconfig.ResolveClientEnableState(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.ClientEnableState = value + return nil +} + +func resolveEndpointModeConfig(cfg aws.Config, options *Options) error { + if options.EndpointMode != EndpointModeStateUnset { + return nil + } + value, found, err := internalconfig.ResolveEndpointModeConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.EndpointMode = value + return nil +} + +func resolveEndpointConfig(cfg aws.Config, options *Options) error { + if len(options.Endpoint) != 0 { + return nil + } + value, found, err := internalconfig.ResolveEndpointConfig(cfg.ConfigSources) + if err != nil || !found { + return err + } + options.Endpoint = value + return nil +} + +func resolveEnableFallback(cfg aws.Config, options *Options) { + if options.EnableFallback != aws.UnknownTernary { + return + } + + disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) + if !ok { + return + } + + if disabled { + options.EnableFallback = aws.FalseTernary + } else { + options.EnableFallback = aws.TrueTernary + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go new file mode 100644 index 00000000000..9e3bdb0e66e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getDynamicDataPath = "/latest/dynamic" + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetDynamicData(ctx context.Context, params *GetDynamicDataInput, optFns ...func(*Options)) (*GetDynamicDataOutput, error) { + if params == nil { + params = &GetDynamicDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDynamicData", params, optFns, + addGetDynamicDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetDynamicDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetDynamicDataInput provides the input parameters for the GetDynamicData +// operation. +type GetDynamicDataInput struct { + // The relative dynamic data path to retrieve. Can be empty string to + // retrieve a response containing a new line separated list of dynamic data + // resources available. + // + // Must not include the dynamic data base path. + // + // May include leading slash. If Path includes trailing slash the trailing + // slash will be included in the request for the resource. + Path string +} + +// GetDynamicDataOutput provides the output parameters for the GetDynamicData +// operation. +type GetDynamicDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetDynamicDataPath, + buildGetDynamicDataOutput) +} + +func buildGetDynamicDataPath(params interface{}) (string, error) { + p, ok := params.(*GetDynamicDataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getDynamicDataPath, p.Path), nil +} + +func buildGetDynamicDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetDynamicDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go new file mode 100644 index 00000000000..24845dccd6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go @@ -0,0 +1,102 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getIAMInfoPath = getMetadataPath + "/iam/info" + +// GetIAMInfo retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetIAMInfo( + ctx context.Context, params *GetIAMInfoInput, optFns ...func(*Options), +) ( + *GetIAMInfoOutput, error, +) { + if params == nil { + params = &GetIAMInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetIAMInfo", params, optFns, + addGetIAMInfoMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetIAMInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetIAMInfoInput provides the input parameters for GetIAMInfo operation. +type GetIAMInfoInput struct{} + +// GetIAMInfoOutput provides the output parameters for GetIAMInfo operation. +type GetIAMInfoOutput struct { + IAMInfo + + ResultMetadata middleware.Metadata +} + +func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetIAMInfoPath, + buildGetIAMInfoOutput, + ) +} + +func buildGetIAMInfoPath(params interface{}) (string, error) { + return getIAMInfoPath, nil +} + +func buildGetIAMInfoOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + imdsResult := &GetIAMInfoOutput{} + if err = json.NewDecoder(body).Decode(&imdsResult.IAMInfo); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + // Any code other success is an error + if !strings.EqualFold(imdsResult.Code, "success") { + return nil, fmt.Errorf("failed to get EC2 IMDS IAM info, %s", + imdsResult.Code) + } + + return imdsResult, nil +} + +// IAMInfo provides the shape for unmarshaling an IAM info from the metadata +// API. +type IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go new file mode 100644 index 00000000000..a87758ed302 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go @@ -0,0 +1,109 @@ +package imds + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getInstanceIdentityDocumentPath = getDynamicDataPath + "/instance-identity/document" + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetInstanceIdentityDocument( + ctx context.Context, params *GetInstanceIdentityDocumentInput, optFns ...func(*Options), +) ( + *GetInstanceIdentityDocumentOutput, error, +) { + if params == nil { + params = &GetInstanceIdentityDocumentInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetInstanceIdentityDocument", params, optFns, + addGetInstanceIdentityDocumentMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetInstanceIdentityDocumentOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetInstanceIdentityDocumentInput provides the input parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentInput struct{} + +// GetInstanceIdentityDocumentOutput provides the output parameters for +// GetInstanceIdentityDocument operation. +type GetInstanceIdentityDocumentOutput struct { + InstanceIdentityDocument + + ResultMetadata middleware.Metadata +} + +func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetInstanceIdentityDocumentOutput, + ) +} + +func buildGetInstanceIdentityDocumentPath(params interface{}) (string, error) { + return getInstanceIdentityDocumentPath, nil +} + +func buildGetInstanceIdentityDocumentOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(resp.Body, ringBuffer) + + output := &GetInstanceIdentityDocumentOutput{} + if err = json.NewDecoder(body).Decode(&output.InstanceIdentityDocument); err != nil { + return nil, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode instance identity document, %w", err), + Snapshot: ringBuffer.Bytes(), + } + } + + return output, nil +} + +// InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go new file mode 100644 index 00000000000..cb0ce4c0004 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go @@ -0,0 +1,76 @@ +package imds + +import ( + "context" + "fmt" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getMetadataPath = "/latest/meta-data" + +// GetMetadata uses the path provided to request information from the Amazon +// EC2 Instance Metadata Service. The content will be returned as a string, or +// error if the request failed. +func (c *Client) GetMetadata(ctx context.Context, params *GetMetadataInput, optFns ...func(*Options)) (*GetMetadataOutput, error) { + if params == nil { + params = &GetMetadataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetMetadata", params, optFns, + addGetMetadataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetMetadataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetMetadataInput provides the input parameters for the GetMetadata +// operation. +type GetMetadataInput struct { + // The relative metadata path to retrieve. Can be empty string to retrieve + // a response containing a new line separated list of metadata resources + // available. + // + // Must not include the metadata base path. + // + // May include leading slash. If Path includes trailing slash the trailing slash + // will be included in the request for the resource. + Path string +} + +// GetMetadataOutput provides the output parameters for the GetMetadata +// operation. +type GetMetadataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetMetadataPath, + buildGetMetadataOutput) +} + +func buildGetMetadataPath(params interface{}) (string, error) { + p, ok := params.(*GetMetadataInput) + if !ok { + return "", fmt.Errorf("unknown parameter type %T", params) + } + + return appendURIPath(getMetadataPath, p.Path), nil +} + +func buildGetMetadataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetMetadataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go new file mode 100644 index 00000000000..7b9b48912af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go @@ -0,0 +1,72 @@ +package imds + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// GetRegion retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *Client) GetRegion( + ctx context.Context, params *GetRegionInput, optFns ...func(*Options), +) ( + *GetRegionOutput, error, +) { + if params == nil { + params = &GetRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRegion", params, optFns, + addGetRegionMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetRegionInput provides the input parameters for GetRegion operation. +type GetRegionInput struct{} + +// GetRegionOutput provides the output parameters for GetRegion operation. +type GetRegionOutput struct { + Region string + + ResultMetadata middleware.Metadata +} + +func addGetRegionMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetInstanceIdentityDocumentPath, + buildGetRegionOutput, + ) +} + +func buildGetRegionOutput(resp *smithyhttp.Response) (interface{}, error) { + out, err := buildGetInstanceIdentityDocumentOutput(resp) + if err != nil { + return nil, err + } + + result, ok := out.(*GetInstanceIdentityDocumentOutput) + if !ok { + return nil, fmt.Errorf("unexpected instance identity document type, %T", out) + } + + region := result.Region + if len(region) == 0 { + return "", fmt.Errorf("instance metadata did not return a region value") + } + + return &GetRegionOutput{ + Region: region, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go new file mode 100644 index 00000000000..841f802c1a3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go @@ -0,0 +1,118 @@ +package imds + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getTokenPath = "/latest/api/token" +const tokenTTLHeader = "X-Aws-Ec2-Metadata-Token-Ttl-Seconds" + +// getToken uses the duration to return a token for EC2 IMDS, or an error if +// the request failed. +func (c *Client) getToken(ctx context.Context, params *getTokenInput, optFns ...func(*Options)) (*getTokenOutput, error) { + if params == nil { + params = &getTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "getToken", params, optFns, + addGetTokenMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*getTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type getTokenInput struct { + TokenTTL time.Duration +} + +type getTokenOutput struct { + Token string + TokenTTL time.Duration + + ResultMetadata middleware.Metadata +} + +func addGetTokenMiddleware(stack *middleware.Stack, options Options) error { + err := addRequestMiddleware(stack, + options, + "PUT", + buildGetTokenPath, + buildGetTokenOutput) + if err != nil { + return err + } + + err = stack.Serialize.Add(&tokenTTLRequestHeader{}, middleware.After) + if err != nil { + return err + } + + return nil +} + +func buildGetTokenPath(interface{}) (string, error) { + return getTokenPath, nil +} + +func buildGetTokenOutput(resp *smithyhttp.Response) (v interface{}, err error) { + defer func() { + closeErr := resp.Body.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("response body close error: %v, original error: %w", closeErr, err) + } + }() + + ttlHeader := resp.Header.Get(tokenTTLHeader) + tokenTTL, err := strconv.ParseInt(ttlHeader, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to parse API token, %w", err) + } + + var token strings.Builder + if _, err = io.Copy(&token, resp.Body); err != nil { + return nil, fmt.Errorf("unable to read API token, %w", err) + } + + return &getTokenOutput{ + Token: token.String(), + TokenTTL: time.Duration(tokenTTL) * time.Second, + }, nil +} + +type tokenTTLRequestHeader struct{} + +func (*tokenTTLRequestHeader) ID() string { return "tokenTTLRequestHeader" } +func (*tokenTTLRequestHeader) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", in.Request) + } + + input, ok := in.Parameters.(*getTokenInput) + if !ok { + return out, metadata, fmt.Errorf("expect getTokenInput, got %T", in.Parameters) + } + + req.Header.Set(tokenTTLHeader, strconv.Itoa(int(input.TokenTTL/time.Second))) + + return next.HandleSerialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go new file mode 100644 index 00000000000..88aa61e9ad9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go @@ -0,0 +1,60 @@ +package imds + +import ( + "context" + "io" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const getUserDataPath = "/latest/user-data" + +// GetUserData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *Client) GetUserData(ctx context.Context, params *GetUserDataInput, optFns ...func(*Options)) (*GetUserDataOutput, error) { + if params == nil { + params = &GetUserDataInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetUserData", params, optFns, + addGetUserDataMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*GetUserDataOutput) + out.ResultMetadata = metadata + return out, nil +} + +// GetUserDataInput provides the input parameters for the GetUserData +// operation. +type GetUserDataInput struct{} + +// GetUserDataOutput provides the output parameters for the GetUserData +// operation. +type GetUserDataOutput struct { + Content io.ReadCloser + + ResultMetadata middleware.Metadata +} + +func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error { + return addAPIRequestMiddleware(stack, + options, + buildGetUserDataPath, + buildGetUserDataOutput) +} + +func buildGetUserDataPath(params interface{}) (string, error) { + return getUserDataPath, nil +} + +func buildGetUserDataOutput(resp *smithyhttp.Response) (interface{}, error) { + return &GetUserDataOutput{ + Content: resp.Body, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go new file mode 100644 index 00000000000..bacdb5d21f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -0,0 +1,11 @@ +// Package imds provides the API client for interacting with the Amazon EC2 +// Instance Metadata Service. +// +// All Client operation calls have a default timeout. If the operation is not +// completed before this timeout expires, the operation will be canceled. This +// timeout can be overridden by providing Context with a timeout or deadline +// with calling the client's operations. +// +// See the EC2 IMDS user guide for more information on using the API. +// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +package imds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go new file mode 100644 index 00000000000..eea163103ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package imds + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.14.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go new file mode 100644 index 00000000000..ce774558932 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go @@ -0,0 +1,114 @@ +package config + +import ( + "fmt" + "strings" +) + +// ClientEnableState provides an enumeration if the client is enabled, +// disabled, or default behavior. +type ClientEnableState uint + +// Enumeration values for ClientEnableState +const ( + ClientDefaultEnableState ClientEnableState = iota + ClientDisabled + ClientEnabled +) + +// EndpointModeState is the EC2 IMDS Endpoint Configuration Mode +type EndpointModeState uint + +// Enumeration values for ClientEnableState +const ( + EndpointModeStateUnset EndpointModeState = iota + EndpointModeStateIPv4 + EndpointModeStateIPv6 +) + +// SetFromString sets the EndpointModeState based on the provided string value. Unknown values will default to EndpointModeStateUnset +func (e *EndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// ClientEnableStateResolver is a config resolver interface for retrieving whether the IMDS client is disabled. +type ClientEnableStateResolver interface { + GetEC2IMDSClientEnableState() (ClientEnableState, bool, error) +} + +// EndpointModeResolver is a config resolver interface for retrieving the EndpointModeState configuration. +type EndpointModeResolver interface { + GetEC2IMDSEndpointMode() (EndpointModeState, bool, error) +} + +// EndpointResolver is a config resolver interface for retrieving the endpoint. +type EndpointResolver interface { + GetEC2IMDSEndpoint() (string, bool, error) +} + +type v1FallbackDisabledResolver interface { + GetEC2IMDSV1FallbackDisabled() (bool, bool) +} + +// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. +func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(ClientEnableStateResolver); ok { + value, found, err = resolver.GetEC2IMDSClientEnableState() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointModeConfig resolves the EndpointModeState from a list of configuration sources. +func ResolveEndpointModeConfig(sources []interface{}) (value EndpointModeState, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointModeResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpointMode() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveEndpointConfig resolves the endpoint from a list of configuration sources. +func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err error) { + for _, source := range sources { + if resolver, ok := source.(EndpointResolver); ok { + value, found, err = resolver.GetEC2IMDSEndpoint() + if err != nil || found { + return value, found, err + } + } + } + return value, found, err +} + +// ResolveV1FallbackDisabled ... +func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { + for _, source := range sources { + if resolver, ok := source.(v1FallbackDisabledResolver); ok { + if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { + return v, true + } + } + } + return false, false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go new file mode 100644 index 00000000000..c8abd64916c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -0,0 +1,285 @@ +package imds + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "net/url" + "path" + "time" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func addAPIRequestMiddleware(stack *middleware.Stack, + options Options, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = addRequestMiddleware(stack, options, "GET", getPath, getOutput) + if err != nil { + return err + } + + // Token Serializer build and state management. + if !options.disableAPIToken { + err = stack.Finalize.Insert(options.tokenProvider, (*retry.Attempt)(nil).ID(), middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Insert(options.tokenProvider, "OperationDeserializer", middleware.Before) + if err != nil { + return err + } + } + + return nil +} + +func addRequestMiddleware(stack *middleware.Stack, + options Options, + method string, + getPath func(interface{}) (string, error), + getOutput func(*smithyhttp.Response) (interface{}, error), +) (err error) { + err = awsmiddleware.AddSDKAgentKey(awsmiddleware.FeatureMetadata, "ec2-imds")(stack) + if err != nil { + return err + } + + // Operation timeout + err = stack.Initialize.Add(&operationTimeout{ + DefaultTimeout: defaultOperationTimeout, + }, middleware.Before) + if err != nil { + return err + } + + // Operation Serializer + err = stack.Serialize.Add(&serializeRequest{ + GetPath: getPath, + Method: method, + }, middleware.After) + if err != nil { + return err + } + + // Operation endpoint resolver + err = stack.Serialize.Insert(&resolveEndpoint{ + Endpoint: options.Endpoint, + EndpointMode: options.EndpointMode, + }, "OperationSerializer", middleware.Before) + if err != nil { + return err + } + + // Operation Deserializer + err = stack.Deserialize.Add(&deserializeResponse{ + GetOutput: getOutput, + }, middleware.After) + if err != nil { + return err + } + + err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: options.ClientLogMode.IsRequest(), + LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(), + LogResponse: options.ClientLogMode.IsResponse(), + LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(), + }, middleware.After) + if err != nil { + return err + } + + err = addSetLoggerMiddleware(stack, options) + if err != nil { + return err + } + + // Retry support + return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{ + Retryer: options.Retryer, + LogRetryAttempts: options.ClientLogMode.IsRetries(), + }) +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +type serializeRequest struct { + GetPath func(interface{}) (string, error) + Method string +} + +func (*serializeRequest) ID() string { + return "OperationSerializer" +} + +func (m *serializeRequest) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + reqPath, err := m.GetPath(in.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to get request URL path, %w", err) + } + + request.Request.URL.Path = reqPath + request.Request.Method = m.Method + + return next.HandleSerialize(ctx, in) +} + +type deserializeResponse struct { + GetOutput func(*smithyhttp.Response) (interface{}, error) +} + +func (*deserializeResponse) ID() string { + return "OperationDeserializer" +} + +func (m *deserializeResponse) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf( + "unexpected transport response type, %T, want %T", out.RawResponse, resp) + } + defer resp.Body.Close() + + // read the full body so that any operation timeouts cleanup will not race + // the body being read. + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return out, metadata, fmt.Errorf("read response body failed, %w", err) + } + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // Anything that's not 200 |< 300 is error + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return out, metadata, &smithyhttp.ResponseError{ + Response: resp, + Err: fmt.Errorf("request to EC2 IMDS failed"), + } + } + + result, err := m.GetOutput(resp) + if err != nil { + return out, metadata, fmt.Errorf( + "unable to get deserialized result for response, %w", err, + ) + } + out.Result = result + + return out, metadata, err +} + +type resolveEndpoint struct { + Endpoint string + EndpointMode EndpointModeState +} + +func (*resolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *resolveEndpoint) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + var endpoint string + if len(m.Endpoint) > 0 { + endpoint = m.Endpoint + } else { + switch m.EndpointMode { + case EndpointModeStateIPv6: + endpoint = defaultIPv6Endpoint + case EndpointModeStateIPv4: + fallthrough + case EndpointModeStateUnset: + endpoint = defaultIPv4Endpoint + default: + return out, metadata, fmt.Errorf("unsupported IMDS endpoint mode") + } + } + + req.URL, err = url.Parse(endpoint) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + return next.HandleSerialize(ctx, in) +} + +const ( + defaultOperationTimeout = 5 * time.Second +) + +// operationTimeout adds a timeout on the middleware stack if the Context the +// stack was called with does not have a deadline. The next middleware must +// complete before the timeout, or the context will be canceled. +// +// If DefaultTimeout is zero, no default timeout will be used if the Context +// does not have a timeout. +// +// The next middleware must also ensure that any resources that are also +// canceled by the stack's context are completely consumed before returning. +// Otherwise the timeout cleanup will race the resource being consumed +// upstream. +type operationTimeout struct { + DefaultTimeout time.Duration +} + +func (*operationTimeout) ID() string { return "OperationTimeout" } + +func (m *operationTimeout) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + output middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { + var cancelFn func() + ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) + defer cancelFn() + } + + return next.HandleInitialize(ctx, input) +} + +// appendURIPath joins a URI path component to the existing path with `/` +// separators between the path components. If the path being added ends with a +// trailing `/` that slash will be maintained. +func appendURIPath(base, add string) string { + reqPath := path.Join(base, add) + if len(add) != 0 && add[len(add)-1] == '/' { + reqPath += "/" + } + return reqPath +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go new file mode 100644 index 00000000000..5703c6e16ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go @@ -0,0 +1,261 @@ +package imds + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/logging" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + // Headers for Token and TTL + tokenHeader = "x-aws-ec2-metadata-token" + defaultTokenTTL = 5 * time.Minute +) + +type tokenProvider struct { + client *Client + tokenTTL time.Duration + + token *apiToken + tokenMux sync.RWMutex + + disabled uint32 // Atomic updated +} + +func newTokenProvider(client *Client, ttl time.Duration) *tokenProvider { + return &tokenProvider{ + client: client, + tokenTTL: ttl, + } +} + +// apiToken provides the API token used by all operation calls for th EC2 +// Instance metadata service. +type apiToken struct { + token string + expires time.Time +} + +var timeNow = time.Now + +// Expired returns if the token is expired. +func (t *apiToken) Expired() bool { + // Calling Round(0) on the current time will truncate the monotonic reading only. Ensures credential expiry + // time is always based on reported wall-clock time. + return timeNow().Round(0).After(t.expires) +} + +func (t *tokenProvider) ID() string { return "APITokenProvider" } + +// HandleFinalize is the finalize stack middleware, that if the token provider is +// enabled, will attempt to add the cached API token to the request. If the API +// token is not cached, it will be retrieved in a separate API call, getToken. +// +// For retry attempts, handler must be added after attempt retryer. +// +// If request for getToken fails the token provider may be disabled from future +// requests, depending on the response status code. +func (t *tokenProvider) HandleFinalize( + ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if t.fallbackEnabled() && !t.enabled() { + // short-circuits to insecure data flow if token provider is disabled. + return next.HandleFinalize(ctx, input) + } + + req, ok := input.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport request type %T", input.Request) + } + + tok, err := t.getToken(ctx) + if err != nil { + // If the error allows the token to downgrade to insecure flow allow that. + var bypassErr *bypassTokenRetrievalError + if errors.As(err, &bypassErr) { + return next.HandleFinalize(ctx, input) + } + + return out, metadata, fmt.Errorf("failed to get API token, %w", err) + } + + req.Header.Set(tokenHeader, tok.token) + + return next.HandleFinalize(ctx, input) +} + +// HandleDeserialize is the deserialize stack middleware for determining if the +// operation the token provider is decorating failed because of a 401 +// unauthorized status code. If the operation failed for that reason the token +// provider needs to be re-enabled so that it can start adding the API token to +// operation calls. +func (t *tokenProvider) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, input) + if err == nil { + return out, metadata, err + } + + resp, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, fmt.Errorf("expect HTTP transport, got %T", out.RawResponse) + } + + if resp.StatusCode == http.StatusUnauthorized { // unauthorized + t.enable() + err = &retryableError{Err: err, isRetryable: true} + } + + return out, metadata, err +} + +func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { + if t.fallbackEnabled() && !t.enabled() { + return nil, &bypassTokenRetrievalError{ + Err: fmt.Errorf("cannot get API token, provider disabled"), + } + } + + t.tokenMux.RLock() + tok = t.token + t.tokenMux.RUnlock() + + if tok != nil && !tok.Expired() { + return tok, nil + } + + tok, err = t.updateToken(ctx) + if err != nil { + return nil, err + } + + return tok, nil +} + +func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) { + t.tokenMux.Lock() + defer t.tokenMux.Unlock() + + // Prevent multiple requests to update retrieving the token. + if t.token != nil && !t.token.Expired() { + tok := t.token + return tok, nil + } + + result, err := t.client.getToken(ctx, &getTokenInput{ + TokenTTL: t.tokenTTL, + }) + if err != nil { + var statusErr interface{ HTTPStatusCode() int } + if errors.As(err, &statusErr) { + switch statusErr.HTTPStatusCode() { + // Disable future get token if failed because of 403, 404, or 405 + case http.StatusForbidden, + http.StatusNotFound, + http.StatusMethodNotAllowed: + + if t.fallbackEnabled() { + logger := middleware.GetLogger(ctx) + logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err) + t.disable() + } + + // 400 errors are terminal, and need to be upstreamed + case http.StatusBadRequest: + return nil, err + } + } + + // Disable if request send failed or timed out getting response + var re *smithyhttp.RequestSendError + var ce *smithy.CanceledError + if errors.As(err, &re) || errors.As(err, &ce) { + atomic.StoreUint32(&t.disabled, 1) + } + + if !t.fallbackEnabled() { + // NOTE: getToken() is an implementation detail of some outer operation + // (e.g. GetMetadata). It has its own retries that have already been exhausted. + // Mark the underlying error as a terminal error. + err = &retryableError{Err: err, isRetryable: false} + return nil, err + } + + // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request + // and allow the request to proceed. Future requests _may_ re-attempt fetching a + // token if not disabled. + return nil, &bypassTokenRetrievalError{Err: err} + } + + tok := &apiToken{ + token: result.Token, + expires: timeNow().Add(result.TokenTTL), + } + t.token = tok + + return tok, nil +} + +// enabled returns if the token provider is current enabled or not. +func (t *tokenProvider) enabled() bool { + return atomic.LoadUint32(&t.disabled) == 0 +} + +// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise +func (t *tokenProvider) fallbackEnabled() bool { + switch t.client.options.EnableFallback { + case aws.FalseTernary: + return false + default: + return true + } +} + +// disable disables the token provider and it will no longer attempt to inject +// the token, nor request updates. +func (t *tokenProvider) disable() { + atomic.StoreUint32(&t.disabled, 1) +} + +// enable enables the token provide to start refreshing tokens, and adding them +// to the pending request. +func (t *tokenProvider) enable() { + t.tokenMux.Lock() + t.token = nil + t.tokenMux.Unlock() + atomic.StoreUint32(&t.disabled, 0) +} + +type bypassTokenRetrievalError struct { + Err error +} + +func (e *bypassTokenRetrievalError) Error() string { + return fmt.Sprintf("bypass token retrieval, %v", e.Err) +} + +func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err } + +type retryableError struct { + Err error + isRetryable bool +} + +func (e *retryableError) RetryableError() bool { return e.isRetryable } + +func (e *retryableError) Error() string { return e.Err.Error() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go new file mode 100644 index 00000000000..ff229c048ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go @@ -0,0 +1,186 @@ +package auth + +import ( + "context" + "fmt" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// SigV4 is a constant representing +// Authentication Scheme Signature Version 4 +const SigV4 = "sigv4" + +// SigV4A is a constant representing +// Authentication Scheme Signature Version 4A +const SigV4A = "sigv4a" + +// None is a constant representing the +// None Authentication Scheme +const None = "none" + +// SupportedSchemes is a data structure +// that indicates the list of supported AWS +// authentication schemes +var SupportedSchemes = map[string]bool{ + SigV4: true, + SigV4A: true, + None: true, +} + +// AuthenticationScheme is a representation of +// AWS authentication schemes +type AuthenticationScheme interface { + isAuthenticationScheme() +} + +// AuthenticationSchemeV4 is a AWS SigV4 representation +type AuthenticationSchemeV4 struct { + Name string + SigningName *string + SigningRegion *string + DisableDoubleEncoding *bool +} + +func (a *AuthenticationSchemeV4) isAuthenticationScheme() {} + +// AuthenticationSchemeV4A is a AWS SigV4A representation +type AuthenticationSchemeV4A struct { + Name string + SigningName *string + SigningRegionSet []string + DisableDoubleEncoding *bool +} + +func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {} + +// AuthenticationSchemeNone is a representation for the none auth scheme +type AuthenticationSchemeNone struct{} + +func (a *AuthenticationSchemeNone) isAuthenticationScheme() {} + +// NoAuthenticationSchemesFoundError is used in signaling +// that no authentication schemes have been specified. +type NoAuthenticationSchemesFoundError struct{} + +func (e *NoAuthenticationSchemesFoundError) Error() string { + return fmt.Sprint("No authentication schemes specified.") +} + +// UnSupportedAuthenticationSchemeSpecifiedError is used in +// signaling that only unsupported authentication schemes +// were specified. +type UnSupportedAuthenticationSchemeSpecifiedError struct { + UnsupportedSchemes []string +} + +func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string { + return fmt.Sprint("Unsupported authentication scheme specified.") +} + +// GetAuthenticationSchemes extracts the relevant authentication scheme data +// into a custom strongly typed Go data structure. +func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) { + var result []AuthenticationScheme + if !p.Has("authSchemes") { + return nil, &NoAuthenticationSchemesFoundError{} + } + + authSchemes, _ := p.Get("authSchemes").([]interface{}) + + var unsupportedSchemes []string + for _, scheme := range authSchemes { + authScheme, _ := scheme.(map[string]interface{}) + + switch authScheme["name"] { + case SigV4: + v4Scheme := AuthenticationSchemeV4{ + Name: SigV4, + SigningName: getSigningName(authScheme), + SigningRegion: getSigningRegion(authScheme), + DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), + } + result = append(result, AuthenticationScheme(&v4Scheme)) + case SigV4A: + v4aScheme := AuthenticationSchemeV4A{ + Name: SigV4A, + SigningName: getSigningName(authScheme), + SigningRegionSet: getSigningRegionSet(authScheme), + DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), + } + result = append(result, AuthenticationScheme(&v4aScheme)) + case None: + noneScheme := AuthenticationSchemeNone{} + result = append(result, AuthenticationScheme(&noneScheme)) + default: + unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string)) + continue + } + } + + if len(result) == 0 { + return nil, &UnSupportedAuthenticationSchemeSpecifiedError{ + UnsupportedSchemes: unsupportedSchemes, + } + } + + return result, nil +} + +type disableDoubleEncoding struct{} + +// SetDisableDoubleEncoding sets or modifies the disable double encoding option +// on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value) +} + +// GetDisableDoubleEncoding retrieves the disable double encoding option +// from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) { + value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool) + return value, ok +} + +func getSigningName(authScheme map[string]interface{}) *string { + signingName, ok := authScheme["signingName"].(string) + if !ok || signingName == "" { + return nil + } + return &signingName +} + +func getSigningRegionSet(authScheme map[string]interface{}) []string { + untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{}) + if !ok { + return nil + } + signingRegionSet := []string{} + for _, item := range untypedSigningRegionSet { + signingRegionSet = append(signingRegionSet, item.(string)) + } + return signingRegionSet +} + +func getSigningRegion(authScheme map[string]interface{}) *string { + signingRegion, ok := authScheme["signingRegion"].(string) + if !ok || signingRegion == "" { + return nil + } + return &signingRegion +} + +func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool { + disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool) + if !ok { + return nil + } + return &disableDoubleEncoding +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md new file mode 100644 index 00000000000..d260b444dca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -0,0 +1,223 @@ +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.41 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.40 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.39 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.38 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.37 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.36 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.35 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.34 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.19 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.18 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.14 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.13 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.12 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.11 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.10 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.9 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.8 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.7 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.7 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.6 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.5 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.4 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.2 (2021-08-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-06-25) + +* **Release**: Release new modules +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go new file mode 100644 index 00000000000..cd4d19b8982 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/config.go @@ -0,0 +1,65 @@ +package configsources + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" +) + +// EnableEndpointDiscoveryProvider is an interface for retrieving external configuration value +// for Enable Endpoint Discovery +type EnableEndpointDiscoveryProvider interface { + GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, found bool, err error) +} + +// ResolveEnableEndpointDiscovery extracts the first instance of a EnableEndpointDiscoveryProvider from the config slice. +// Additionally returns a aws.EndpointDiscoveryEnableState to indicate if the value was found in provided configs, +// and error if one is encountered. +func ResolveEnableEndpointDiscovery(ctx context.Context, configs []interface{}) (value aws.EndpointDiscoveryEnableState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(EnableEndpointDiscoveryProvider); ok { + value, found, err = p.GetEnableEndpointDiscovery(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseDualStackEndpointProvider is an interface for retrieving external configuration values for UseDualStackEndpoint +type UseDualStackEndpointProvider interface { + GetUseDualStackEndpoint(context.Context) (value aws.DualStackEndpointState, found bool, err error) +} + +// ResolveUseDualStackEndpoint extracts the first instance of a UseDualStackEndpoint from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseDualStackEndpoint(ctx context.Context, configs []interface{}) (value aws.DualStackEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseDualStackEndpointProvider); ok { + value, found, err = p.GetUseDualStackEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +// UseFIPSEndpointProvider is an interface for retrieving external configuration values for UseFIPSEndpoint +type UseFIPSEndpointProvider interface { + GetUseFIPSEndpoint(context.Context) (value aws.FIPSEndpointState, found bool, err error) +} + +// ResolveUseFIPSEndpoint extracts the first instance of a UseFIPSEndpointProvider from the config slice. +// Additionally, returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveUseFIPSEndpoint(ctx context.Context, configs []interface{}) (value aws.FIPSEndpointState, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(UseFIPSEndpointProvider); ok { + value, found, err = p.GetUseFIPSEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go new file mode 100644 index 00000000000..e7835f85241 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go @@ -0,0 +1,57 @@ +package configsources + +import ( + "context" +) + +// ServiceBaseEndpointProvider is needed to search for all providers +// that provide a configured service endpoint +type ServiceBaseEndpointProvider interface { + GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources +// while allowing for configured endpoints to be disabled +func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { + if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { + return "", false, nil + } + + for _, cs := range configs { + if p, ok := cs.(ServiceBaseEndpointProvider); ok { + value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go new file mode 100644 index 00000000000..991b7b84abe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package configsources + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.2.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go new file mode 100644 index 00000000000..e6223dd3b3e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go @@ -0,0 +1,94 @@ +package awsrulesfn + +import ( + "strings" +) + +// ARN provides AWS ARN components broken out into a data structure. +type ARN struct { + Partition string + Service string + Region string + AccountId string + ResourceId OptionalStringSlice +} + +const ( + arnDelimiters = ":" + resourceDelimiters = "/:" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 +) + +// ParseARN returns an [ARN] value parsed from the input string provided. If +// the ARN cannot be parsed nil will be returned, and error added to +// [ErrorCollector]. +func ParseARN(input string) *ARN { + if !strings.HasPrefix(input, arnPrefix) { + return nil + } + + sections := strings.SplitN(input, arnDelimiters, arnSections) + if numSections := len(sections); numSections != arnSections { + return nil + } + + if sections[sectionPartition] == "" { + return nil + } + if sections[sectionService] == "" { + return nil + } + if sections[sectionResource] == "" { + return nil + } + + return &ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountId: sections[sectionAccountID], + ResourceId: splitResource(sections[sectionResource]), + } +} + +// splitResource splits the resource components by the ARN resource delimiters. +func splitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// OptionalStringSlice provides a helper to safely get the index of a string +// slice that may be out of bounds. Returns pointer to string if index is +// valid. Otherwise returns nil. +type OptionalStringSlice []string + +// Get returns a string pointer of the string at index i if the index is valid. +// Otherwise returns nil. +func (s OptionalStringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go new file mode 100644 index 00000000000..d5a365853f8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go @@ -0,0 +1,3 @@ +// Package awsrulesfn provides AWS focused endpoint rule functions for +// evaluating endpoint resolution rules. +package awsrulesfn diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go new file mode 100644 index 00000000000..df72da97ce3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go @@ -0,0 +1,7 @@ +//go:build codegen +// +build codegen + +package awsrulesfn + +//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go new file mode 100644 index 00000000000..637e5fc18e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go @@ -0,0 +1,51 @@ +package awsrulesfn + +import ( + "net" + "strings" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket +// name and can be used with Amazon S3 virtual hosted style addressing. Similar +// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label +// must be [3:63] characters long, all lowercase, and not formatted as an IP +// address. +func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool { + // input should not be formatted as an IP address + // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but + // validation further down will catch that anyway (it's guaranteed to have + // unfriendly characters % and : if that's the case) + if net.ParseIP(input) != nil { + return false + } + + var labels []string + if allowSubDomains { + labels = strings.Split(input, ".") + } else { + labels = []string{input} + } + + for _, label := range labels { + // validate special length constraints + if l := len(label); l < 3 || l > 63 { + return false + } + + // Validate no capital letters + for _, r := range label { + if r >= 'A' && r <= 'Z' { + return false + } + } + + // Validate valid host label + if !smithyhttp.ValidHostLabel(label) { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go new file mode 100644 index 00000000000..ba6032758a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -0,0 +1,75 @@ +package awsrulesfn + +import "regexp" + +// Partition provides the metadata describing an AWS partition. +type Partition struct { + ID string `json:"id"` + Regions map[string]RegionOverrides `json:"regions"` + RegionRegex string `json:"regionRegex"` + DefaultConfig PartitionConfig `json:"outputs"` +} + +// PartitionConfig provides the endpoint metadata for an AWS region or partition. +type PartitionConfig struct { + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` +} + +type RegionOverrides struct { + Name *string `json:"name"` + DnsSuffix *string `json:"dnsSuffix"` + DualStackDnsSuffix *string `json:"dualStackDnsSuffix"` + SupportsFIPS *bool `json:"supportsFIPS"` + SupportsDualStack *bool `json:"supportsDualStack"` +} + +const defaultPartition = "aws" + +func getPartition(partitions []Partition, region string) *PartitionConfig { + for _, partition := range partitions { + if v, ok := partition.Regions[region]; ok { + p := mergeOverrides(partition.DefaultConfig, v) + return &p + } + } + + for _, partition := range partitions { + regionRegex := regexp.MustCompile(partition.RegionRegex) + if regionRegex.MatchString(region) { + v := partition.DefaultConfig + return &v + } + } + + for _, partition := range partitions { + if partition.ID == defaultPartition { + v := partition.DefaultConfig + return &v + } + } + + return nil +} + +func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig { + if from.Name != nil { + into.Name = *from.Name + } + if from.DnsSuffix != nil { + into.DnsSuffix = *from.DnsSuffix + } + if from.DualStackDnsSuffix != nil { + into.DualStackDnsSuffix = *from.DualStackDnsSuffix + } + if from.SupportsFIPS != nil { + into.SupportsFIPS = *from.SupportsFIPS + } + if from.SupportsDualStack != nil { + into.SupportsDualStack = *from.SupportsDualStack + } + return into +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go new file mode 100644 index 00000000000..7ea49d4ea40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -0,0 +1,343 @@ +// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT. + +package awsrulesfn + +// GetPartition returns an AWS [Partition] for the region provided. If the +// partition cannot be determined nil will be returned. +func GetPartition(region string) *PartitionConfig { + return getPartition(partitions, region) +} + +var partitions = []Partition{ + { + ID: "aws", + RegionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "af-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-south-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "aws-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ca-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-central-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-north-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-south-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "me-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "me-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "sa-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-west-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-cn", + RegionRegex: "^cn\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "aws-cn-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "cn-north-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "cn-northwest-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso", + RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{ + "aws-iso-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-iso-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-iso-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso-b", + RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{ + "aws-iso-b-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isob-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json new file mode 100644 index 00000000000..ab107ca5511 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -0,0 +1,213 @@ +{ + "partitions" : [ { + "id" : "aws", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-east-1", + "name" : "aws", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "aws-global" : { + "description" : "AWS Standard global region" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + } + }, { + "id" : "aws-cn", + "outputs" : { + "dnsSuffix" : "amazonaws.com.cn", + "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "implicitGlobalRegion" : "cn-northwest-1", + "name" : "aws-cn", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "aws-cn-global" : { + "description" : "AWS China global region" + }, + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "AWS GovCloud (US) global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } + }, { + "id" : "aws-iso", + "outputs" : { + "dnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "c2s.ic.gov", + "implicitGlobalRegion" : "us-iso-east-1", + "name" : "aws-iso", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-global" : { + "description" : "AWS ISO (US) global region" + }, + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + } + }, { + "id" : "aws-iso-b", + "outputs" : { + "dnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "sc2s.sgov.gov", + "implicitGlobalRegion" : "us-isob-east-1", + "name" : "aws-iso-b", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-b-global" : { + "description" : "AWS ISOB (US) global region" + }, + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + } + }, { + "id" : "aws-iso-e", + "outputs" : { + "dnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "cloud.adc-e.uk", + "implicitGlobalRegion" : "eu-isoe-west-1", + "name" : "aws-iso-e", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", + "regions" : { } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "implicitGlobalRegion" : "us-isof-south-1", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { } + } ], + "version" : "1.1" +} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md new file mode 100644 index 00000000000..1a188a5715b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -0,0 +1,196 @@ +# v2.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.3.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.2.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.1.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.0.0 (2021-11-06) + +* **Release**: Endpoint Variant Model Support +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/LICENSE rename to vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/LICENSE.txt diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go new file mode 100644 index 00000000000..32251a7e3cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/endpoints.go @@ -0,0 +1,302 @@ +package endpoints + +import ( + "fmt" + "github.com/aws/smithy-go/logging" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// DefaultKey is a compound map key of a variant and other values. +type DefaultKey struct { + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointKey is a compound map key of a region and associated variant value. +type EndpointKey struct { + Region string + Variant EndpointVariant + ServiceVariant ServiceVariant +} + +// EndpointVariant is a bit field to describe the endpoints attributes. +type EndpointVariant uint64 + +const ( + // FIPSVariant indicates that the endpoint is FIPS capable. + FIPSVariant EndpointVariant = 1 << (64 - 1 - iota) + + // DualStackVariant indicates that the endpoint is DualStack capable. + DualStackVariant +) + +// ServiceVariant is a bit field to describe the service endpoint attributes. +type ServiceVariant uint64 + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "s3v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the provided logger. + LogDeprecated bool + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool + + // Instruct the resolver to use a service endpoint that supports dual-stack. + // If a service does not have a dual-stack endpoint an error will be returned by the resolver. + UseDualStackEndpoint aws.DualStackEndpointState + + // Instruct the resolver to use a service endpoint that supports FIPS. + // If a service does not have a FIPS endpoint an error will be returned by the resolver. + UseFIPSEndpoint aws.FIPSEndpointState + + // ServiceVariant is a bitfield of service specified endpoint variant data. + ServiceVariant ServiceVariant +} + +// GetEndpointVariant returns the EndpointVariant for the variant associated options. +func (o Options) GetEndpointVariant() (v EndpointVariant) { + if o.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled { + v |= DualStackVariant + } + if o.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { + v |= FIPSVariant + } + return v +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + if opts.Logger == nil { + opts.Logger = logging.Nop{} + } + + if len(opts.ResolvedRegion) > 0 { + region = opts.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region, opts) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults map[DefaultKey]Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string, opts Options) bool { + _, ok := p.Endpoints[EndpointKey{ + Region: region, + Variant: opts.GetEndpointVariant(), + }] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + endpoints := p.Endpoints + + variant := options.GetEndpointVariant() + serviceVariant := options.ServiceVariant + + defaults := p.Defaults[DefaultKey{ + Variant: variant, + ServiceVariant: serviceVariant, + }] + + return p.endpointForRegion(region, variant, serviceVariant, endpoints).resolve(p.ID, region, defaults, options) +} + +func (p Partition) endpointForRegion(region string, variant EndpointVariant, serviceVariant ServiceVariant, endpoints Endpoints) Endpoint { + key := EndpointKey{ + Region: region, + Variant: variant, + } + + if e, ok := endpoints[key]; ok { + return e + } + + if !p.IsRegionalized { + return endpoints[EndpointKey{ + Region: p.PartitionEndpoint, + Variant: variant, + ServiceVariant: serviceVariant, + }] + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{} +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[EndpointKey]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string + + // Indicates that this endpoint is deprecated. + Deprecated aws.Ternary +} + +// IsZero returns whether the endpoint structure is an empty (zero) value. +func (e Endpoint) IsZero() bool { + switch { + case e.Unresolveable != aws.UnknownTernary: + return false + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (CredentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + } + return true +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) (aws.Endpoint, error) { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + if e.IsZero() { + return aws.Endpoint{}, fmt.Errorf("unable to resolve endpoint for region: %v", region) + } + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + if e.Deprecated == aws.TrueTernary && options.LogDeprecated { + options.Logger.Logf(logging.Warn, "endpoint identifier %q, url %q marked as deprecated", region, u) + } + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if other.Deprecated != aws.UnknownTernary { + e.Deprecated = other.Deprecated + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go new file mode 100644 index 00000000000..adb6f69921b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package endpoints + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "2.5.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md new file mode 100644 index 00000000000..a04ce9b6f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -0,0 +1,255 @@ +# v1.7.0 (2023-11-13) + +* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section. + +# v1.6.0 (2023-11-09.2) + +* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored + +# v1.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2023-11-07) + +* **Bug Fix**: Fix subproperty performance regression + +# v1.5.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.45 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.43 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. + +# v1.3.42 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.41 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.40 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.39 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.38 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.37 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.36 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.35 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.34 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.28 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2022-05-17) + +* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2022-03-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2022-02-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2022-01-28) + +* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug. + +# v1.3.4 (2022-01-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2022-01-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2021-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-08-04) + +* **Feature**: adds error handling for defered close calls +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-07-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-07-01) + +* **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values. + +# v1.0.1 (2021-06-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.0 (2021-05-20) + +* **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go new file mode 100644 index 00000000000..0f278d55e6c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/errors.go @@ -0,0 +1,22 @@ +package ini + +import "fmt" + +// UnableToReadFile is an error indicating that a ini file could not be read +type UnableToReadFile struct { + Err error +} + +// Error returns an error message and the underlying error message if present +func (e *UnableToReadFile) Error() string { + base := "unable to read file" + if e.Err == nil { + return base + } + return fmt.Sprintf("%s: %v", base, e.Err) +} + +// Unwrap returns the underlying error +func (e *UnableToReadFile) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go new file mode 100644 index 00000000000..9ffee6eedaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ini + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go new file mode 100644 index 00000000000..cefcce91e76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go @@ -0,0 +1,56 @@ +// Package ini implements parsing of the AWS shared config file. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +package ini + +import ( + "fmt" + "io" + "os" + "strings" +) + +// OpenFile parses shared config from the given file path. +func OpenFile(path string) (sections Sections, err error) { + f, oerr := os.Open(path) + if oerr != nil { + return Sections{}, &UnableToReadFile{Err: oerr} + } + + defer func() { + closeErr := f.Close() + if err == nil { + err = closeErr + } else if closeErr != nil { + err = fmt.Errorf("close error: %v, original error: %w", closeErr, err) + } + }() + + return Parse(f, path) +} + +// Parse parses shared config from the given reader. +func Parse(r io.Reader, path string) (Sections, error) { + contents, err := io.ReadAll(r) + if err != nil { + return Sections{}, fmt.Errorf("read all: %v", err) + } + + lines := strings.Split(string(contents), "\n") + tokens, err := tokenize(lines) + if err != nil { + return Sections{}, fmt.Errorf("tokenize: %v", err) + } + + return parse(tokens, path), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go new file mode 100644 index 00000000000..0fcb8ec0763 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go @@ -0,0 +1,109 @@ +package ini + +import ( + "fmt" + "strings" +) + +func parse(tokens []lineToken, path string) Sections { + parser := &parser{ + path: path, + sections: NewSections(), + } + parser.parse(tokens) + return parser.sections +} + +type parser struct { + csection, ckey string // current state + path string // source file path + sections Sections // parse result +} + +func (p *parser) parse(tokens []lineToken) { + for _, otok := range tokens { + switch tok := otok.(type) { + case *lineTokenProfile: + p.handleProfile(tok) + case *lineTokenProperty: + p.handleProperty(tok) + case *lineTokenSubProperty: + p.handleSubProperty(tok) + case *lineTokenContinuation: + p.handleContinuation(tok) + } + } +} + +func (p *parser) handleProfile(tok *lineTokenProfile) { + name := tok.Name + if tok.Type != "" { + name = fmt.Sprintf("%s %s", tok.Type, tok.Name) + } + p.ckey = "" + p.csection = name + if _, ok := p.sections.container[name]; !ok { + p.sections.container[name] = NewSection(name) + } +} + +func (p *parser) handleProperty(tok *lineTokenProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + p.ckey = tok.Key + if _, ok := p.sections.container[p.csection].values[tok.Key]; ok { + section := p.sections.container[p.csection] + section.Logs = append(p.sections.container[p.csection].Logs, + fmt.Sprintf( + "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n", + p.csection, tok.Key, tok.Key, p.path, + ), + ) + p.sections.container[p.csection] = section + } + + p.sections.container[p.csection].values[tok.Key] = Value{ + str: tok.Value, + } + p.sections.container[p.csection].SourceFile[tok.Key] = p.path +} + +func (p *parser) handleSubProperty(tok *lineTokenSubProperty) { + if p.csection == "" { + return // LEGACY: don't error on "global" properties + } + + if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" { + // This is an "orphaned" subproperty, either because it's at + // the beginning of a section or because the last property's + // value isn't empty. Either way we're lenient here and + // "promote" this to a normal property. + p.handleProperty(&lineTokenProperty{ + Key: tok.Key, + Value: strings.TrimSpace(trimComment(tok.Value)), + }) + return + } + + if p.sections.container[p.csection].values[p.ckey].mp == nil { + p.sections.container[p.csection].values[p.ckey] = Value{ + mp: map[string]string{}, + } + } + p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value +} + +func (p *parser) handleContinuation(tok *lineTokenContinuation) { + if p.ckey == "" { + return + } + + value, _ := p.sections.container[p.csection].values[p.ckey] + if value.str != "" && value.mp == nil { + value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value) + } + + p.sections.container[p.csection].values[p.ckey] = value +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go new file mode 100644 index 00000000000..dd89848e696 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go @@ -0,0 +1,157 @@ +package ini + +import ( + "sort" +) + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// NewSections returns empty ini Sections +func NewSections() Sections { + return Sections{ + container: make(map[string]Section, 0), + } +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// HasSection denotes if Sections consist of a section with +// provided name. +func (t Sections) HasSection(p string) bool { + _, ok := t.container[p] + return ok +} + +// SetSection sets a section value for provided section name. +func (t Sections) SetSection(p string, v Section) Sections { + t.container[p] = v + return t +} + +// DeleteSection deletes a section entry/value for provided section name./ +func (t Sections) DeleteSection(p string) { + delete(t.container, p) +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + // Name is the Section profile name + Name string + + // values are the values within parsed profile + values values + + // Errors is the list of errors + Errors []error + + // Logs is the list of logs + Logs []string + + // SourceFile is the INI Source file from where this section + // was retrieved. They key is the property, value is the + // source file the property was retrieved from. + SourceFile map[string]string +} + +// NewSection returns an initialize section for the name +func NewSection(name string) Section { + return Section{ + Name: name, + values: values{}, + SourceFile: map[string]string{}, + } +} + +// List will return a list of all +// services in values +func (t Section) List() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// UpdateSourceFile updates source file for a property to provided filepath. +func (t Section) UpdateSourceFile(property string, filepath string) { + t.SourceFile[property] = filepath +} + +// UpdateValue updates value for a provided key with provided value +func (t Section) UpdateValue(k string, v Value) error { + t.values[k] = v + return nil +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) (bool, bool) { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) (int64, bool) { + return t.values[k].IntValue() +} + +// Map returns a map value at k +func (t Section) Map(k string) map[string]string { + return t.values[k].MapValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) (float64, bool) { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go new file mode 100644 index 00000000000..478239a2505 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go @@ -0,0 +1,83 @@ +package ini + +import "strings" + +func trimComment(v string) string { + rest, _, _ := strings.Cut(v, "#") + rest, _, _ = strings.Cut(rest, ";") + return rest +} + +// assumes no surrounding comment +func splitProperty(s string) (string, string, bool) { + equalsi := strings.Index(s, "=") + coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment + sep := "=" + if equalsi == -1 || coloni != -1 && coloni < equalsi { + sep = ":" + } + + k, v, ok := strings.Cut(s, sep) + if !ok { + return "", "", false + } + return strings.TrimSpace(k), strings.TrimSpace(v), true +} + +// assumes no surrounding comment, whitespace, or profile brackets +func splitProfile(s string) (string, string) { + var first int + for i, r := range s { + if isLineSpace(r) { + if first == 0 { + first = i + } + } else { + if first != 0 { + return s[:first], s[i:] + } + } + } + if first == 0 { + return "", s // type component is effectively blank + } + return "", "" +} + +func isLineSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func unquote(s string) string { + if isSingleQuoted(s) || isDoubleQuoted(s) { + return s[1 : len(s)-1] + } + return s +} + +// applies various legacy conversions to property values: +// - remote wrapping single/doublequotes +// - expand escaped quote and newline sequences +func legacyStrconv(s string) string { + s = unquote(s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `\'`, `'`) + s = strings.ReplaceAll(s, `\n`, "\n") + return s +} + +func isSingleQuoted(s string) bool { + return hasAffixes(s, "'", "'") +} + +func isDoubleQuoted(s string) bool { + return hasAffixes(s, `"`, `"`) +} + +func isBracketed(s string) bool { + return hasAffixes(s, "[", "]") +} + +func hasAffixes(s, left, right string) bool { + return strings.HasPrefix(s, left) && strings.HasSuffix(s, right) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go new file mode 100644 index 00000000000..6e9a03744e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go @@ -0,0 +1,32 @@ +package ini + +type lineToken interface { + isLineToken() +} + +type lineTokenProfile struct { + Type string + Name string +} + +func (*lineTokenProfile) isLineToken() {} + +type lineTokenProperty struct { + Key string + Value string +} + +func (*lineTokenProperty) isLineToken() {} + +type lineTokenContinuation struct { + Value string +} + +func (*lineTokenContinuation) isLineToken() {} + +type lineTokenSubProperty struct { + Key string + Value string +} + +func (*lineTokenSubProperty) isLineToken() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go new file mode 100644 index 00000000000..9778a1738b3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go @@ -0,0 +1,91 @@ +package ini + +import ( + "strings" +) + +func tokenize(lines []string) ([]lineToken, error) { + tokens := make([]lineToken, 0, len(lines)) + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 || isLineComment(line) { + continue + } + + if tok := asProfile(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asSubProperty(line); tok != nil { + tokens = append(tokens, tok) + } else if tok := asContinuation(line); tok != nil { + tokens = append(tokens, tok) + } // unrecognized tokens are effectively ignored + } + return tokens, nil +} + +func isLineComment(line string) bool { + trimmed := strings.TrimLeft(line, " \t") + return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";") +} + +func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment" + trimmed := strings.TrimSpace(trimComment(line)) // "[ type name ]" + if !isBracketed(trimmed) { + return nil + } + trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ") + trimmed = strings.TrimSpace(trimmed) // "type name" / "name" + typ, name := splitProfile(trimmed) + return &lineTokenProfile{ + Type: typ, + Name: name, + } +} + +func asProperty(line string) *lineTokenProperty { + if isLineSpace(rune(line[0])) { + return nil + } + + trimmed := strings.TrimRight(trimComment(line), " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenProperty{ + Key: strings.ToLower(k), // LEGACY: normalize key case + Value: legacyStrconv(v), // LEGACY: see func docs + } +} + +func asSubProperty(line string) *lineTokenSubProperty { + if !isLineSpace(rune(line[0])) { + return nil + } + + // comments on sub-properties are included in the value + trimmed := strings.TrimLeft(line, " \t") + k, v, ok := splitProperty(trimmed) + if !ok { + return nil + } + + return &lineTokenSubProperty{ // same LEGACY constraints as in normal property + Key: strings.ToLower(k), + Value: legacyStrconv(v), + } +} + +func asContinuation(line string) *lineTokenContinuation { + if !isLineSpace(rune(line[0])) { + return nil + } + + // includes comments like sub-properties + trimmed := strings.TrimLeft(line, " \t") + return &lineTokenContinuation{ + Value: trimmed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go new file mode 100644 index 00000000000..ade75bf34e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go @@ -0,0 +1,104 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case StringType: + return "STRING" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + StringType + QuotedStringType +) + +// Value is a union container +type Value struct { + Type ValueType + + str string + mp map[string]string +} + +// NewStringValue returns a Value type generated using a string input. +func NewStringValue(str string) (Value, error) { + return Value{str: str}, nil +} + +func (v Value) String() string { + switch v.Type { + case StringType: + return fmt.Sprintf("string: %s", string(v.str)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.str)) + default: + return "union not set" + } +} + +// MapValue returns a map value for sub properties +func (v Value) MapValue() map[string]string { + newlineParts := strings.Split(string(v.str), "\n") + mp := make(map[string]string) + for _, part := range newlineParts { + operandParts := strings.Split(part, "=") + if len(operandParts) < 2 { + continue + } + key := strings.TrimSpace(operandParts[0]) + val := strings.TrimSpace(operandParts[1]) + mp[key] = val + } + return mp +} + +// IntValue returns an integer value +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.str), 0, 64) + if err != nil { + return 0, false + } + return i, true +} + +// FloatValue returns a float value +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.str), 64) + if err != nil { + return 0, false + } + return f, true +} + +// BoolValue returns a bool value +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if strings.EqualFold(v.str, "true") { + return true, true + } else if strings.EqualFold(v.str, "false") { + return false, true + } + return false, false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + return v.str +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go new file mode 100644 index 00000000000..c8484dcd759 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/rand/rand.go @@ -0,0 +1,33 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +var floatMaxBigInt = big.NewInt(1 << 53) + +// Float64 returns a float64 read from an io.Reader source. The returned float will be between [0.0, 1.0). +func Float64(reader io.Reader) (float64, error) { + bi, err := rand.Int(reader, floatMaxBigInt) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %v", err) + } + + return float64(bi.Int64()) / (1 << 53), nil +} + +// CryptoRandFloat64 returns a random float64 obtained from the crypto rand +// source. +func CryptoRandFloat64() (float64, error) { + return Float64(Reader) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go new file mode 100644 index 00000000000..2b42cbe6421 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/interfaces.go @@ -0,0 +1,9 @@ +package sdk + +// Invalidator provides access to a type's invalidate method to make it +// invalidate it cache. +// +// e.g aws.SafeCredentialsProvider's Invalidate method. +type Invalidator interface { + Invalidate() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go new file mode 100644 index 00000000000..8e8dabad548 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdk/time.go @@ -0,0 +1,74 @@ +package sdk + +import ( + "context" + "time" +) + +func init() { + NowTime = time.Now + Sleep = time.Sleep + SleepWithContext = sleepWithContext +} + +// NowTime is a value for getting the current time. This value can be overridden +// for testing mocking out current time. +var NowTime func() time.Time + +// Sleep is a value for sleeping for a duration. This value can be overridden +// for testing and mocking out sleep duration. +var Sleep func(time.Duration) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// This value can be overridden for testing and mocking out sleep duration. +var SleepWithContext func(context.Context, time.Duration) error + +// sleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the +// Context's error will be returned. +func sleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// noOpSleepWithContext does nothing, returns immediately. +func noOpSleepWithContext(context.Context, time.Duration) error { + return nil +} + +func noOpSleep(time.Duration) {} + +// TestingUseNopSleep is a utility for disabling sleep across the SDK for +// testing. +func TestingUseNopSleep() func() { + SleepWithContext = noOpSleepWithContext + Sleep = noOpSleep + + return func() { + SleepWithContext = sleepWithContext + Sleep = time.Sleep + } +} + +// TestingUseReferenceTime is a utility for swapping the time function across the SDK to return a specific reference time +// for testing purposes. +func TestingUseReferenceTime(referenceTime time.Time) func() { + NowTime = func() time.Time { + return referenceTime + } + return func() { + NowTime = time.Now + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000000..c96b717e08a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go @@ -0,0 +1,47 @@ +package shareddefaults + +import ( + "os" + "os/user" + "path/filepath" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + // Ignore errors since we only care about Windows and *nix. + home, _ := os.UserHomeDir() + + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go new file mode 100644 index 00000000000..d008ae27cb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..fe6a62006a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go new file mode 100644 index 00000000000..cb70616e802 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go @@ -0,0 +1,7 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight +package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..e8a1b17d564 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go new file mode 100644 index 00000000000..5d69db5f249 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/timeconv/duration.go @@ -0,0 +1,13 @@ +package timeconv + +import "time" + +// FloatSecondsDur converts a fractional seconds to duration. +func FloatSecondsDur(v float64) time.Duration { + return time.Duration(v * float64(time.Second)) +} + +// DurSecondsFloat converts a duration into fractional seconds. +func DurSecondsFloat(d time.Duration) float64 { + return float64(d) / float64(time.Second) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md new file mode 100644 index 00000000000..99a54769af8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -0,0 +1,240 @@ +# v1.10.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.25 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.24 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.23 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.22 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.21 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.19 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.18 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.17 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.16 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-02-24) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.2 (2021-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-11-06) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go new file mode 100644 index 00000000000..cc919701a06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -0,0 +1,48 @@ +package presignedurl + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +// WithIsPresigning adds the isPresigning sentinel value to a context to signal +// that the middleware stack is using the presign flow. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func WithIsPresigning(ctx context.Context) context.Context { + return middleware.WithStackValue(ctx, isPresigningKey{}, true) +} + +// GetIsPresigning returns if the context contains the isPresigning sentinel +// value for presigning flows. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetIsPresigning(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, isPresigningKey{}).(bool) + return v +} + +type isPresigningKey struct{} + +// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// will update the stack's context to be flagged as being invoked for the +// purpose of presigning. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) +} + +type asIsPresigningMiddleware struct{} + +func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } + +func (asIsPresigningMiddleware) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + ctx = WithIsPresigning(ctx) + return next.HandleInitialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go new file mode 100644 index 00000000000..1b85375cf80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/doc.go @@ -0,0 +1,3 @@ +// Package presignedurl provides the customizations for API clients to fill in +// presigned URLs into input parameters. +package presignedurl diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go new file mode 100644 index 00000000000..66b8acd87c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package presignedurl + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.10.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go new file mode 100644 index 00000000000..1e2f5c8122a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/middleware.go @@ -0,0 +1,110 @@ +package presignedurl + +import ( + "context" + "fmt" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + + "github.com/aws/smithy-go/middleware" +) + +// URLPresigner provides the interface to presign the input parameters in to a +// presigned URL. +type URLPresigner interface { + // PresignURL presigns a URL. + PresignURL(ctx context.Context, srcRegion string, params interface{}) (*v4.PresignedHTTPRequest, error) +} + +// ParameterAccessor provides an collection of accessor to for retrieving and +// setting the values needed to PresignedURL generation +type ParameterAccessor struct { + // GetPresignedURL accessor points to a function that retrieves a presigned url if present + GetPresignedURL func(interface{}) (string, bool, error) + + // GetSourceRegion accessor points to a function that retrieves source region for presigned url + GetSourceRegion func(interface{}) (string, bool, error) + + // CopyInput accessor points to a function that takes in an input, and returns a copy. + CopyInput func(interface{}) (interface{}, error) + + // SetDestinationRegion accessor points to a function that sets destination region on api input struct + SetDestinationRegion func(interface{}, string) error + + // SetPresignedURL accessor points to a function that sets presigned url on api input struct + SetPresignedURL func(interface{}, string) error +} + +// Options provides the set of options needed by the presigned URL middleware. +type Options struct { + // Accessor are the parameter accessors used by this middleware + Accessor ParameterAccessor + + // Presigner is the URLPresigner used by the middleware + Presigner URLPresigner +} + +// AddMiddleware adds the Presign URL middleware to the middleware stack. +func AddMiddleware(stack *middleware.Stack, opts Options) error { + return stack.Initialize.Add(&presign{options: opts}, middleware.Before) +} + +// RemoveMiddleware removes the Presign URL middleware from the stack. +func RemoveMiddleware(stack *middleware.Stack) error { + _, err := stack.Initialize.Remove((*presign)(nil).ID()) + return err +} + +type presign struct { + options Options +} + +func (m *presign) ID() string { return "Presign" } + +func (m *presign) HandleInitialize( + ctx context.Context, input middleware.InitializeInput, next middleware.InitializeHandler, +) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + // If PresignedURL is already set ignore middleware. + if _, ok, err := m.options.Accessor.GetPresignedURL(input.Parameters); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if ok { + return next.HandleInitialize(ctx, input) + } + + // If have source region is not set ignore middleware. + srcRegion, ok, err := m.options.Accessor.GetSourceRegion(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } else if !ok || len(srcRegion) == 0 { + return next.HandleInitialize(ctx, input) + } + + // Create a copy of the original input so the destination region value can + // be added. This ensures that value does not leak into the original + // request parameters. + paramCpy, err := m.options.Accessor.CopyInput(input.Parameters) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Destination region is the API client's configured region. + dstRegion := awsmiddleware.GetRegion(ctx) + if err = m.options.Accessor.SetDestinationRegion(paramCpy, dstRegion); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + presignedReq, err := m.options.Presigner.PresignURL(ctx, srcRegion, paramCpy) + if err != nil { + return out, metadata, fmt.Errorf("unable to create presigned URL, %w", err) + } + + // Update the original input with the presigned URL value. + if err = m.options.Accessor.SetPresignedURL(input.Parameters, presignedReq.URL); err != nil { + return out, metadata, fmt.Errorf("presign middleware failed, %w", err) + } + + return next.HandleInitialize(ctx, input) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md new file mode 100644 index 00000000000..a4c1d645969 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -0,0 +1,327 @@ +# v1.17.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-10-02) + +* **Feature**: Fix FIPS Endpoints in aws-us-gov. + +# v1.14.1 (2023-09-22) + +* No change notes available for this release. + +# v1.14.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.13.6 (2023-08-31) + +* No change notes available for this release. + +# v1.13.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-08-01) + +* No change notes available for this release. + +# v1.13.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2023-06-15) + +* No change notes available for this release. + +# v1.12.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2023-05-04) + +* No change notes available for this release. + +# v1.12.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2023-04-10) + +* No change notes available for this release. + +# v1.12.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.12.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.12.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.11.28 (2022-12-20) + +* No change notes available for this release. + +# v1.11.27 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.25 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.24 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.23 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.22 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2022-08-30) + +* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. + +# v1.11.18 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2022-08-15) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# v1.11.16 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2022-07-11) + +* No change notes available for this release. + +# v1.11.11 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-06-16) + +* No change notes available for this release. + +# v1.11.8 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-05-26) + +* No change notes available for this release. + +# v1.11.6 (2022-05-25) + +* No change notes available for this release. + +# v1.11.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated API models +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. + +# v1.6.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go new file mode 100644 index 00000000000..a2579227393 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS Single Sign-On. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service. This should be used over + // the deprecated EndpointResolver + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + resolveEndpointResolverV2(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type endpointDisableHTTPSMiddleware struct { + EndpointDisableHTTPS bool +} + +func (*endpointDisableHTTPSMiddleware) ID() string { + return "endpointDisableHTTPSMiddleware" +} + +func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleSerialize(ctx, in) + +} +func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ + EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "OperationSerializer", middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go new file mode 100644 index 00000000000..0383bb0bd05 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) { + if params == nil { + params = &GetRoleCredentialsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetRoleCredentials", params, optFns, c.addOperationGetRoleCredentialsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetRoleCredentialsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetRoleCredentialsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The friendly name of the role that is assigned to the user. + // + // This member is required. + RoleName *string + + noSmithyDocumentSerde +} + +type GetRoleCredentialsOutput struct { + + // The credentials for the role that is assigned to the user. + RoleCredentials *types.RoleCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetRoleCredentials{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetRoleCredentialsResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetRoleCredentials(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetRoleCredentials", + } +} + +type opGetRoleCredentialsResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetRoleCredentialsResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetRoleCredentialsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetRoleCredentialsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetRoleCredentialsResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go new file mode 100644 index 00000000000..cc28543f8c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -0,0 +1,361 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all roles that are assigned to the user for a given AWS account. +func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesInput, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if params == nil { + params = &ListAccountRolesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccountRoles", params, optFns, c.addOperationListAccountRolesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountRolesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountRolesInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // The identifier for the AWS account that is assigned to the user. + // + // This member is required. + AccountId *string + + // The number of items that clients can request per page. + MaxResults *int32 + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountRolesOutput struct { + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []types.RoleInfo + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccountRoles{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addListAccountRolesResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + +// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles +type ListAccountRolesPaginatorOptions struct { + // The number of items that clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountRolesPaginator is a paginator for ListAccountRoles +type ListAccountRolesPaginator struct { + options ListAccountRolesPaginatorOptions + client ListAccountRolesAPIClient + params *ListAccountRolesInput + nextToken *string + firstPage bool +} + +// NewListAccountRolesPaginator returns a new ListAccountRolesPaginator +func NewListAccountRolesPaginator(client ListAccountRolesAPIClient, params *ListAccountRolesInput, optFns ...func(*ListAccountRolesPaginatorOptions)) *ListAccountRolesPaginator { + if params == nil { + params = &ListAccountRolesInput{} + } + + options := ListAccountRolesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountRolesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountRolesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccountRoles page. +func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountRolesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccountRoles", + } +} + +type opListAccountRolesResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opListAccountRolesResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opListAccountRolesResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addListAccountRolesResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opListAccountRolesResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go new file mode 100644 index 00000000000..567f6c6691e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -0,0 +1,358 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by +// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. +func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if params == nil { + params = &ListAccountsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAccounts", params, optFns, c.addOperationListAccountsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAccountsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAccountsInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + // This is the number of items clients can request per page. + MaxResults *int32 + + // (Optional) When requesting subsequent pages, this is the page token from the + // previous response output. + NextToken *string + + noSmithyDocumentSerde +} + +type ListAccountsOutput struct { + + // A paginated response with the list of account information and the next token if + // more results are available. + AccountList []types.AccountInfo + + // The page token client that is used to retrieve the list of accounts. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListAccounts{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addListAccountsResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpListAccountsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + +// ListAccountsPaginatorOptions is the paginator options for ListAccounts +type ListAccountsPaginatorOptions struct { + // This is the number of items clients can request per page. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAccountsPaginator is a paginator for ListAccounts +type ListAccountsPaginator struct { + options ListAccountsPaginatorOptions + client ListAccountsAPIClient + params *ListAccountsInput + nextToken *string + firstPage bool +} + +// NewListAccountsPaginator returns a new ListAccountsPaginator +func NewListAccountsPaginator(client ListAccountsAPIClient, params *ListAccountsInput, optFns ...func(*ListAccountsPaginatorOptions)) *ListAccountsPaginator { + if params == nil { + params = &ListAccountsInput{} + } + + options := ListAccountsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAccountsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAccountsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAccounts page. +func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAccountsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListAccounts", + } +} + +type opListAccountsResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opListAccountsResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opListAccountsResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addListAccountsResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opListAccountsResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go new file mode 100644 index 00000000000..c30da0296f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -0,0 +1,261 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Removes the locally stored SSO tokens from the client-side cache and sends an +// API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. If a user uses IAM Identity +// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is +// used to obtain an IAM session, as specified in the corresponding IAM Identity +// Center permission set. More specifically, IAM Identity Center assumes an IAM +// role in the target account on behalf of the user, and the corresponding +// temporary AWS credentials are returned to the client. After user logout, any +// existing IAM role sessions that were created by using IAM Identity Center +// permission sets continue based on the duration configured in the permission set. +// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. +func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { + if params == nil { + params = &LogoutInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Logout", params, optFns, c.addOperationLogoutMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*LogoutOutput) + out.ResultMetadata = metadata + return out, nil +} + +type LogoutInput struct { + + // The token issued by the CreateToken API call. For more information, see + // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // This member is required. + AccessToken *string + + noSmithyDocumentSerde +} + +type LogoutOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpLogout{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addLogoutResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpLogoutValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opLogout(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "Logout", + } +} + +type opLogoutResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opLogoutResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opLogoutResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssoportal" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssoportal" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssoportal") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addLogoutResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opLogoutResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go new file mode 100644 index 00000000000..8bba205f435 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -0,0 +1,1151 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/sso/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsRestjson1_deserializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_deserializeOpGetRoleCredentials) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetRoleCredentials(response, &metadata) + } + output := &GetRoleCredentialsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetRoleCredentialsOutput(v **GetRoleCredentialsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetRoleCredentialsOutput + if *v == nil { + sv = &GetRoleCredentialsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "roleCredentials": + if err := awsRestjson1_deserializeDocumentRoleCredentials(&sv.RoleCredentials, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccountRoles struct { +} + +func (*awsRestjson1_deserializeOpListAccountRoles) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccountRoles(response, &metadata) + } + output := &ListAccountRolesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountRolesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountRolesOutput(v **ListAccountRolesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountRolesOutput + if *v == nil { + sv = &ListAccountRolesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "roleList": + if err := awsRestjson1_deserializeDocumentRoleListType(&sv.RoleList, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListAccounts struct { +} + +func (*awsRestjson1_deserializeOpListAccounts) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListAccounts(response, &metadata) + } + output := &ListAccountsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListAccountsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListAccountsOutput(v **ListAccountsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAccountsOutput + if *v == nil { + sv = &ListAccountsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountList": + if err := awsRestjson1_deserializeDocumentAccountListType(&sv.AccountList, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextTokenType to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpLogout struct { +} + +func (*awsRestjson1_deserializeOpLogout) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorLogout(response, &metadata) + } + output := &LogoutOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorTooManyRequestsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyRequestsException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentTooManyRequestsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccountInfo(v **types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccountInfo + if *v == nil { + sv = &types.AccountInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "accountName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountNameType to be of type string, got %T instead", value) + } + sv.AccountName = ptr.String(jtv) + } + + case "emailAddress": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EmailAddressType to be of type string, got %T instead", value) + } + sv.EmailAddress = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAccountListType(v *[]types.AccountInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AccountInfo + if *v == nil { + cv = []types.AccountInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AccountInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentAccountInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ResourceNotFoundException + if *v == nil { + sv = &types.ResourceNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleCredentials(v **types.RoleCredentials, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleCredentials + if *v == nil { + sv = &types.RoleCredentials{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessKeyType to be of type string, got %T instead", value) + } + sv.AccessKeyId = ptr.String(jtv) + } + + case "expiration": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationTimestampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Expiration = i64 + } + + case "secretAccessKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SecretAccessKeyType to be of type string, got %T instead", value) + } + sv.SecretAccessKey = ptr.String(jtv) + } + + case "sessionToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SessionTokenType to be of type string, got %T instead", value) + } + sv.SessionToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleInfo(v **types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.RoleInfo + if *v == nil { + sv = &types.RoleInfo{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccountIdType to be of type string, got %T instead", value) + } + sv.AccountId = ptr.String(jtv) + } + + case "roleName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleNameType to be of type string, got %T instead", value) + } + sv.RoleName = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentRoleListType(v *[]types.RoleInfo, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.RoleInfo + if *v == nil { + cv = []types.RoleInfo{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.RoleInfo + destAddr := &col + if err := awsRestjson1_deserializeDocumentRoleInfo(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TooManyRequestsException + if *v == nil { + sv = &types.TooManyRequestsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.UnauthorizedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedException + if *v == nil { + sv = &types.UnauthorizedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go new file mode 100644 index 00000000000..59456d5dc27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -0,0 +1,21 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sso provides the API client, operations, and parameter types for AWS +// Single Sign-On. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity Center +// resources such as the AWS access portal. Users can get AWS account applications +// and roles assigned to them and get federated into the application. Although AWS +// Single Sign-On was renamed, the sso and identitystore API namespaces will +// continue to retain their original name for backward compatibility purposes. For +// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . This reference guide describes the IAM Identity Center Portal operations that +// you can call programatically and includes detailed information on data types and +// errors. AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. +// The SDKs provide a convenient way to create programmatic access to IAM Identity +// Center and other AWS services. For more information about the AWS SDKs, +// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) +// . +package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go new file mode 100644 index 00000000000..11538705946 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -0,0 +1,519 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssoportal" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// Utility function to aid with translating pseudo-regions to classical regions +// with the appropriate setting indicated by the pseudo-region +func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(pr, fipsInfix) || + strings.Contains(pr, fipsPrefix) || + strings.Contains(pr, fipsSuffix) { + region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + fips = aws.FIPSEndpointStateEnabled + } else { + region = pr + } + + return region, fips +} + +// builtInParameterResolver is the interface responsible for resolving BuiltIn +// values during the sourcing of EndpointParameters +type builtInParameterResolver interface { + ResolveBuiltIns(*EndpointParameters) error +} + +// builtInResolver resolves modeled BuiltIn values using only the members defined +// below. +type builtInResolver struct { + // The AWS region used to dispatch the request. + Region string + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseDualStack aws.DualStackEndpointState + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseFIPS aws.FIPSEndpointState + + // Base endpoint that can potentially be modified during Endpoint resolution. + Endpoint *string +} + +// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to +// each BuiltIn value is generated. +func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { + + region, _ := mapPseudoRegion(b.Region) + if len(region) == 0 { + return fmt.Errorf("Could not resolve AWS::Region") + } else { + params.Region = aws.String(region) + } + if b.UseDualStack == aws.DualStackEndpointStateEnabled { + params.UseDualStack = aws.Bool(true) + } else { + params.UseDualStack = aws.Bool(false) + } + if b.UseFIPS == aws.FIPSEndpointStateEnabled { + params.UseFIPS = aws.Bool(true) + } else { + params.UseFIPS = aws.Bool(false) + } + params.Endpoint = b.Endpoint + return nil +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json new file mode 100644 index 00000000000..8e618418710 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -0,0 +1,33 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_GetRoleCredentials.go", + "api_op_ListAccountRoles.go", + "api_op_ListAccounts.go", + "api_op_Logout.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sso", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go new file mode 100644 index 00000000000..cd496510f1c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sso + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.17.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..f044afde47c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "portal.sso.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "portal.sso-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "portal.sso-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "portal.sso.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go new file mode 100644 index 00000000000..02e31411566 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go @@ -0,0 +1,284 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpGetRoleCredentials struct { +} + +func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetRoleCredentialsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/federation/credentials") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.RoleName != nil { + encoder.SetQuery("role_name").String(*v.RoleName) + } + + return nil +} + +type awsRestjson1_serializeOpListAccountRoles struct { +} + +func (*awsRestjson1_serializeOpListAccountRoles) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountRolesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/roles") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.AccountId != nil { + encoder.SetQuery("account_id").String(*v.AccountId) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListAccounts struct { +} + +func (*awsRestjson1_serializeOpListAccounts) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAccountsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/assignment/accounts") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListAccountsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + if v.MaxResults != nil { + encoder.SetQuery("max_result").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("next_token").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpLogout struct { +} + +func (*awsRestjson1_serializeOpLogout) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*LogoutInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/logout") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsLogoutInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.AccessToken != nil && len(*v.AccessToken) > 0 { + locationName := "X-Amz-Sso_bearer_token" + encoder.SetHeader(locationName).String(*v.AccessToken) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go new file mode 100644 index 00000000000..e97a126e8bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go @@ -0,0 +1,115 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// Indicates that a problem occurred with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ResourceNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ResourceNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ResourceNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is being made too frequently and is more than what +// the server can handle. +type TooManyRequestsException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyRequestsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyRequestsException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyRequestsException" + } + return *e.ErrorCodeOverride +} +func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go new file mode 100644 index 00000000000..8dc02296b11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -0,0 +1,61 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +// Provides information about your AWS account. +type AccountInfo struct { + + // The identifier of the AWS account that is assigned to the user. + AccountId *string + + // The display name of the AWS account that is assigned to the user. + AccountName *string + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string + + noSmithyDocumentSerde +} + +// Provides information about the role credentials that are assigned to the user. +type RoleCredentials struct { + + // The identifier used for the temporary security credentials. For more + // information, see Using Temporary Security Credentials to Request Access to AWS + // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string + + // The date on which temporary security credentials expire. + Expiration int64 + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SecretAccessKey *string + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + SessionToken *string + + noSmithyDocumentSerde +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + + // The identifier of the AWS account assigned to the user. + AccountId *string + + // The friendly name of the role that is assigned to the user. + RoleName *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go new file mode 100644 index 00000000000..f6bf461f74b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/validators.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sso + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpGetRoleCredentials struct { +} + +func (*validateOpGetRoleCredentials) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetRoleCredentials) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetRoleCredentialsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetRoleCredentialsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccountRoles struct { +} + +func (*validateOpListAccountRoles) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccountRoles) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountRolesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountRolesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListAccounts struct { +} + +func (*validateOpListAccounts) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListAccounts) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListAccountsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListAccountsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpLogout struct { +} + +func (*validateOpLogout) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpLogout) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*LogoutInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpLogoutInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpGetRoleCredentialsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetRoleCredentials{}, middleware.After) +} + +func addOpListAccountRolesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccountRoles{}, middleware.After) +} + +func addOpListAccountsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListAccounts{}, middleware.After) +} + +func addOpLogoutValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpLogout{}, middleware.After) +} + +func validateOpGetRoleCredentialsInput(v *GetRoleCredentialsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetRoleCredentialsInput"} + if v.RoleName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleName")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountRolesInput(v *ListAccountRolesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountRolesInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if v.AccountId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccountId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListAccountsInput(v *ListAccountsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListAccountsInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpLogoutInput(v *LogoutInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "LogoutInput"} + if v.AccessToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md new file mode 100644 index 00000000000..b47827d7243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -0,0 +1,317 @@ +# v1.19.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2023-09-22) + +* No change notes available for this release. + +# v1.17.0 (2023-09-20) + +* **Feature**: Update FIPS endpoints in aws-us-gov. + +# v1.16.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.15.6 (2023-09-05) + +* No change notes available for this release. + +# v1.15.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-08-01) + +* No change notes available for this release. + +# v1.15.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.14 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.13 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.12 (2023-06-15) + +* No change notes available for this release. + +# v1.14.11 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.10 (2023-05-04) + +* No change notes available for this release. + +# v1.14.9 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.8 (2023-04-10) + +* No change notes available for this release. + +# v1.14.7 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.6 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.14.3 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-02-15) + +* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910. +* **Bug Fix**: Correct error type parsing for restJson services. + +# v1.14.1 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.13.11 (2022-12-19) + +* No change notes available for this release. + +# v1.13.10 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2022-10-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2022-09-30) + +* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference. + +# v1.13.5 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-08-25) + +* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# v1.12.14 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-07-11) + +* No change notes available for this release. + +# v1.12.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-05-27) + +* No change notes available for this release. + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go new file mode 100644 index 00000000000..6a56093d8ac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO OIDC" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS SSO OIDC. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service. This should be used over + // the deprecated EndpointResolver + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + resolveEndpointResolverV2(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type endpointDisableHTTPSMiddleware struct { + EndpointDisableHTTPS bool +} + +func (*endpointDisableHTTPSMiddleware) ID() string { + return "endpointDisableHTTPSMiddleware" +} + +func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleSerialize(ctx, in) + +} +func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ + EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "OperationSerializer", middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go new file mode 100644 index 00000000000..43df6256cf0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -0,0 +1,316 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns an access token for the authorized client. The access token +// issued will be used to fetch short-term credentials for the assigned roles in +// the AWS account. +func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { + if params == nil { + params = &CreateTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenInput struct { + + // The unique identifier string for each client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientId *string + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientSecret *string + + // Supports grant types for the authorization code, refresh token, and device code + // request. For device code requests, specify the following value: + // urn:ietf:params:oauth:grant-type:device_code For information about how to + // obtain the device code, see the StartDeviceAuthorization topic. + // + // This member is required. + GrantType *string + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a token. + Code *string + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from an + // in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string + + // The location of the application that will receive the authorization code. Users + // authorize the service to send the request to this location. + RedirectUri *string + + // Currently, refreshToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The token used to obtain an access token in the event that the access token is + // invalid or expired. + RefreshToken *string + + // The list of scopes that is defined by the client. Upon authorization, this list + // is used to restrict permissions when granting an access token. + Scope []string + + noSmithyDocumentSerde +} + +type CreateTokenOutput struct { + + // An opaque token to access IAM Identity Center resources assigned to a user. + AccessToken *string + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // Currently, idToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . The identifier of the user that associated with the access token, if present. + IdToken *string + + // Currently, refreshToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the IAM + // Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) + // . A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken . + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addCreateTokenResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpCreateTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateToken", + } +} + +type opCreateTokenResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opCreateTokenResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opCreateTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssooidc" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssooidc" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssooidc") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addCreateTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opCreateTokenResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go new file mode 100644 index 00000000000..b88ebb7067f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -0,0 +1,281 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { + if params == nil { + params = &RegisterClientInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RegisterClientOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterClientInput struct { + + // The friendly name of the client. + // + // This member is required. + ClientName *string + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // This member is required. + ClientType *string + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []string + + noSmithyDocumentSerde +} + +type RegisterClientOutput struct { + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt int64 + + // A secret string generated for the client. The client will use this string to + // get authenticated by the service in subsequent calls. + ClientSecret *string + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt int64 + + // The endpoint where the client can get an access token. + TokenEndpoint *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addRegisterClientResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpRegisterClientValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterClient", + } +} + +type opRegisterClientResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opRegisterClientResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opRegisterClientResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssooidc" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssooidc" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssooidc") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addRegisterClientResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opRegisterClientResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go new file mode 100644 index 00000000000..327da5f7373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -0,0 +1,289 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Initiates device authorization by requesting a pair of verification codes from +// the authorization service. +func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { + if params == nil { + params = &StartDeviceAuthorizationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartDeviceAuthorizationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartDeviceAuthorizationInput struct { + + // The unique identifier string for the client that is registered with IAM + // Identity Center. This value should come from the persisted result of the + // RegisterClient API operation. + // + // This member is required. + ClientId *string + + // A secret string that is generated for the client. This value should come from + // the persisted result of the RegisterClient API operation. + // + // This member is required. + ClientSecret *string + + // The URL for the AWS access portal. For more information, see Using the AWS + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // This member is required. + StartUrl *string + + noSmithyDocumentSerde +} + +type StartDeviceAuthorizationOutput struct { + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn int32 + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval int32 + + // A one-time user verification code. This is needed to authorize an in-use device. + UserCode *string + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addStartDeviceAuthorizationResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartDeviceAuthorization", + } +} + +type opStartDeviceAuthorizationResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opStartDeviceAuthorizationResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opStartDeviceAuthorizationResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "awsssooidc" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "awsssooidc" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("awsssooidc") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addStartDeviceAuthorizationResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opStartDeviceAuthorizationResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go new file mode 100644 index 00000000000..ca30d22f97b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -0,0 +1,1689 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsRestjson1_deserializeOpCreateToken struct { +} + +func (*awsRestjson1_deserializeOpCreateToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) + } + output := &CreateTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenOutput + if *v == nil { + sv = &CreateTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpRegisterClient struct { +} + +func (*awsRestjson1_deserializeOpRegisterClient) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) + } + output := &RegisterClientOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientMetadataException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterClientOutput + if *v == nil { + sv = &RegisterClientOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.AuthorizationEndpoint = ptr.String(jtv) + } + + case "clientId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) + } + sv.ClientId = ptr.String(jtv) + } + + case "clientIdIssuedAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientIdIssuedAt = i64 + } + + case "clientSecret": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) + } + sv.ClientSecret = ptr.String(jtv) + } + + case "clientSecretExpiresAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientSecretExpiresAt = i64 + } + + case "tokenEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.TokenEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) + } + output := &StartDeviceAuthorizationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartDeviceAuthorizationOutput + if *v == nil { + sv = &StartDeviceAuthorizationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) + } + sv.DeviceCode = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "interval": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Interval = int32(i64) + } + + case "userCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) + } + sv.UserCode = ptr.String(jtv) + } + + case "verificationUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUri = ptr.String(jtv) + } + + case "verificationUriComplete": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUriComplete = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AuthorizationPendingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientMetadataException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidGrantException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidScopeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SlowDownException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnsupportedGrantTypeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationPendingException + if *v == nil { + sv = &types.AuthorizationPendingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientException + if *v == nil { + sv = &types.InvalidClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientMetadataException + if *v == nil { + sv = &types.InvalidClientMetadataException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantException + if *v == nil { + sv = &types.InvalidGrantException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidScopeException + if *v == nil { + sv = &types.InvalidScopeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SlowDownException + if *v == nil { + sv = &types.SlowDownException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedClientException + if *v == nil { + sv = &types.UnauthorizedClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedGrantTypeException + if *v == nil { + sv = &types.UnsupportedGrantTypeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go new file mode 100644 index 00000000000..2239427d889 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -0,0 +1,36 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ssooidc provides the API client, operations, and parameter types for +// AWS SSO OIDC. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect (OIDC) +// is a web service that enables a client (such as AWS CLI or a native application) +// to register with IAM Identity Center. The service also enables the client to +// fetch the user’s access token upon successful authentication and authorization +// with IAM Identity Center. Although AWS Single Sign-On was renamed, the sso and +// identitystore API namespaces will continue to retain their original name for +// backward compatibility purposes. For more information, see IAM Identity Center +// rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) +// . Considerations for Using This Guide Before you begin using this guide, we +// recommend that you first review the following important information about how +// the IAM Identity Center OIDC service works. +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard ( +// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) +// that are necessary to enable single sign-on authentication with the AWS CLI. +// Support for other OIDC flows frequently needed for native applications, such as +// Authorization Code Flow (+ PKCE), will be addressed in future releases. +// - The service emits only OIDC access tokens, such that obtaining a new token +// (For example, token refresh) requires explicit user re-authentication. +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with IAM-protected +// AWS service endpoints. For more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go new file mode 100644 index 00000000000..50b490cbdb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -0,0 +1,519 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssooidc" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// Utility function to aid with translating pseudo-regions to classical regions +// with the appropriate setting indicated by the pseudo-region +func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(pr, fipsInfix) || + strings.Contains(pr, fipsPrefix) || + strings.Contains(pr, fipsSuffix) { + region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + fips = aws.FIPSEndpointStateEnabled + } else { + region = pr + } + + return region, fips +} + +// builtInParameterResolver is the interface responsible for resolving BuiltIn +// values during the sourcing of EndpointParameters +type builtInParameterResolver interface { + ResolveBuiltIns(*EndpointParameters) error +} + +// builtInResolver resolves modeled BuiltIn values using only the members defined +// below. +type builtInResolver struct { + // The AWS region used to dispatch the request. + Region string + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseDualStack aws.DualStackEndpointState + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseFIPS aws.FIPSEndpointState + + // Base endpoint that can potentially be modified during Endpoint resolution. + Endpoint *string +} + +// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to +// each BuiltIn value is generated. +func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { + + region, _ := mapPseudoRegion(b.Region) + if len(region) == 0 { + return fmt.Errorf("Could not resolve AWS::Region") + } else { + params.Region = aws.String(region) + } + if b.UseDualStack == aws.DualStackEndpointStateEnabled { + params.UseDualStack = aws.Bool(true) + } else { + params.UseDualStack = aws.Bool(false) + } + if b.UseFIPS == aws.FIPSEndpointStateEnabled { + params.UseFIPS = aws.Bool(true) + } else { + params.UseFIPS = aws.Bool(false) + } + params.Endpoint = b.Endpoint + return nil +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json new file mode 100644 index 00000000000..403fac7c5af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -0,0 +1,32 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateToken.go", + "api_op_RegisterClient.go", + "api_op_StartDeviceAuthorization.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go new file mode 100644 index 00000000000..bcd16fdeac3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ssooidc + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.19.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..c48da8b88a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -0,0 +1,526 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO OIDC endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go new file mode 100644 index 00000000000..efca8b25079 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -0,0 +1,309 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateToken struct { +} + +func (*awsRestjson1_serializeOpCreateToken) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.DeviceCode != nil { + ok := object.Key("deviceCode") + ok.String(*v.DeviceCode) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpRegisterClient struct { +} + +func (*awsRestjson1_serializeOpRegisterClient) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterClientInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/client/register") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientName != nil { + ok := object.Key("clientName") + ok.String(*v.ClientName) + } + + if v.ClientType != nil { + ok := object.Key("clientType") + ok.String(*v.ClientType) + } + + if v.Scopes != nil { + ok := object.Key("scopes") + if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/device_authorization") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.StartUrl != nil { + ok := object.Key("startUrl") + ok.String(*v.StartUrl) + } + + return nil +} + +func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go new file mode 100644 index 00000000000..115a51a9eb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -0,0 +1,366 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AccessDeniedException" + } + return *e.ErrorCodeOverride +} +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AuthorizationPendingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AuthorizationPendingException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "AuthorizationPendingException" + } + return *e.ErrorCodeOverride +} +func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that an error from the service occurred while trying to process a +// request. +type InternalServerException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InternalServerException" + } + return *e.ErrorCodeOverride +} +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret . +type InvalidClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientMetadataException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientMetadataException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidClientMetadataException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidGrantException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that something is wrong with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequestException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidScopeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidScopeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidScopeException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is making the request too frequently and is more than +// the service can handle. +type SlowDownException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SlowDownException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SlowDownException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "SlowDownException" + } + return *e.ErrorCodeOverride +} +func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is not currently authorized to make the request. This +// can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedClientException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnauthorizedClientException" + } + return *e.ErrorCodeOverride +} +func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedGrantTypeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedGrantTypeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedGrantTypeException" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go new file mode 100644 index 00000000000..0ec0789f8d9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go @@ -0,0 +1,9 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go new file mode 100644 index 00000000000..5a309484e01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go @@ -0,0 +1,142 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateToken struct { +} + +func (*validateOpCreateToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterClient struct { +} + +func (*validateOpRegisterClient) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterClientInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterClientInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartDeviceAuthorization struct { +} + +func (*validateOpStartDeviceAuthorization) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartDeviceAuthorizationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) +} + +func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) +} + +func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) +} + +func validateOpCreateTokenInput(v *CreateTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterClientInput(v *RegisterClientInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} + if v.ClientName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientName")) + } + if v.ClientType == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.StartUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md new file mode 100644 index 00000000000..eefcd873062 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -0,0 +1,338 @@ +# v1.25.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-10-02) + +* **Feature**: STS API updates for assumeRole + +# v1.22.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.21.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.1 (2023-08-01) + +* No change notes available for this release. + +# v1.21.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-07-25) + +* **Feature**: API updates for the AWS Security Token Service + +# v1.19.3 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.2 (2023-06-15) + +* No change notes available for this release. + +# v1.19.1 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.0 (2023-05-08) + +* **Feature**: Documentation updates for AWS Security Token Service. + +# v1.18.11 (2023-05-04) + +* No change notes available for this release. + +# v1.18.10 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.9 (2023-04-10) + +* No change notes available for this release. + +# v1.18.8 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2023-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2023-03-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2023-02-22) + +* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes. + +# v1.18.4 (2023-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2023-02-03) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization. + +# v1.18.2 (2023-01-25) + +* **Documentation**: Doc only change to update wording in a key topic + +# v1.18.1 (2023-01-23) + +* No change notes available for this release. + +# v1.18.0 (2023-01-05) + +* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401). + +# v1.17.7 (2022-12-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2022-11-22) + +* No change notes available for this release. + +# v1.17.4 (2022-11-17) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.17.3 (2022-11-16) + +* No change notes available for this release. + +# v1.17.2 (2022-11-10) + +* No change notes available for this release. + +# v1.17.1 (2022-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-10-21) + +* **Feature**: Add presign functionality for sts:AssumeRole operation +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2022-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2022-09-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2022-08-30) + +* No change notes available for this release. + +# v1.16.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2022-05-16) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.16.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Documentation**: Updated service client model to latest release. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-30) + +* **Feature**: API client updated + +# v1.10.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.2 (2021-10-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.2 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-07-15) + +* **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model. +* **Documentation**: Updated service model to latest revision. +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go new file mode 100644 index 00000000000..c29d8cad17a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -0,0 +1,630 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "STS" +const ServiceAPIVersion = "2011-06-15" + +// Client provides the API client to make operations call for AWS Security Token +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service. This should be used over + // the deprecated EndpointResolver + EndpointResolverV2 EndpointResolverV2 + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + resolveEndpointResolverV2(&options) + + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) +} + +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack); err != nil { + return err + } + + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +// HTTPPresignerV4 represents presigner interface used by presign url client +type HTTPPresignerV4 interface { + PresignHTTP( + ctx context.Context, credentials aws.Credentials, r *http.Request, + payloadHash string, service string, region string, signingTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (url string, signedHeader http.Header, err error) +} + +// PresignOptions represents the presign client options +type PresignOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // Presigner is the presigner used by the presign url client + Presigner HTTPPresignerV4 +} + +func (o PresignOptions) copy() PresignOptions { + clientOptions := make([]func(*Options), len(o.ClientOptions)) + copy(clientOptions, o.ClientOptions) + o.ClientOptions = clientOptions + return o +} + +// WithPresignClientFromClientOptions is a helper utility to retrieve a function +// that takes PresignOption as input +func WithPresignClientFromClientOptions(optFns ...func(*Options)) func(*PresignOptions) { + return withPresignClientFromClientOptions(optFns).options +} + +type withPresignClientFromClientOptions []func(*Options) + +func (w withPresignClientFromClientOptions) options(o *PresignOptions) { + o.ClientOptions = append(o.ClientOptions, w...) +} + +// PresignClient represents the presign url client +type PresignClient struct { + client *Client + options PresignOptions +} + +// NewPresignClient generates a presign client using provided API Client and +// presign options +func NewPresignClient(c *Client, optFns ...func(*PresignOptions)) *PresignClient { + var options PresignOptions + for _, fn := range optFns { + fn(&options) + } + if len(options.ClientOptions) != 0 { + c = New(c.options, options.ClientOptions...) + } + + if options.Presigner == nil { + options.Presigner = newDefaultV4Signer(c.options) + } + + return &PresignClient{ + client: c, + options: options, + } +} + +func withNopHTTPClientAPIOption(o *Options) { + o.HTTPClient = smithyhttp.NopClient{} +} + +type presignConverter PresignOptions + +func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Finalize.Clear() + stack.Deserialize.Clear() + stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) + stack.Build.Remove("UserAgent") + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.Presigner, + LogSigning: options.ClientLogMode.IsSigning(), + }) + err = stack.Finalize.Add(pmw, middleware.After) + if err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + // convert request to a GET request + err = query.AddAsGetRequestMiddleware(stack) + if err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} + +type endpointDisableHTTPSMiddleware struct { + EndpointDisableHTTPS bool +} + +func (*endpointDisableHTTPSMiddleware) ID() string { + return "endpointDisableHTTPSMiddleware" +} + +func (m *endpointDisableHTTPSMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointDisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleSerialize(ctx, in) + +} +func addendpointDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&endpointDisableHTTPSMiddleware{ + EndpointDisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "OperationSerializer", middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go new file mode 100644 index 00000000000..0ef7affc598 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -0,0 +1,558 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources. These temporary credentials consist of an access +// key ID, a secret access key, and a security token. Typically, you use AssumeRole +// within your account or for cross-account access. For a comparison of AssumeRole +// with other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRole can be used to make API calls to any Amazon Web Services service +// with the following exception: You cannot call the Amazon Web Services STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. When you create a role, you create two policies: a role +// trust policy that specifies who can assume the role, and a permissions policy +// that specifies what can be done with the role. You specify the trusted principal +// that is allowed to assume the role in the role trust policy. To assume a role +// from a different account, your Amazon Web Services account must be trusted by +// the role. The trust relationship is defined in the role's trust policy when the +// role is created. That trust policy states which accounts are allowed to delegate +// that access to users in the account. A user who wants to access a role in a +// different account must also have permissions that are delegated from the account +// administrator. The administrator must attach a policy that allows the user to +// call AssumeRole for the ARN of the role in the other account. To allow a user +// to assume a role in the same account, you can do either of the following: +// - Attach a policy to the user that allows the user to call AssumeRole (as long +// as the role's trust policy trusts the account). +// - Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the same +// account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your +// session. These tags are called session tags. For more information about session +// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include +// multi-factor authentication (MFA) information when you call AssumeRole . This is +// useful for cross-account scenarios to ensure that the user that assumes the role +// has been authenticated with an Amazon Web Services MFA device. In that scenario, +// the trust policy of the role being assumed includes a condition that tests for +// MFA authentication. If the caller does not include valid MFA information, the +// request to assume the role is denied. The condition in a trust policy that tests +// for MFA authentication might look like the following example. "Condition": +// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see +// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for +// the SerialNumber and TokenCode parameters. The SerialNumber value identifies +// the user's hardware or virtual MFA device. The TokenCode is the time-based +// one-time password (TOTP) that the MFA device produces. +func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { + if params == nil { + params = &AssumeRoleInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRole", params, optFns, c.addOperationAssumeRoleMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleInput struct { + + // The Amazon Resource Name (ARN) of the role to assume. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Use the role session name to + // uniquely identify a session when the same role is assumed by different + // principals or for different reasons. In cross-account scenarios, the role + // session name is visible to, and can be logged by the account that owns the role. + // The role session name is also used in the ARN of the assumed role principal. + // This means that subsequent cross-account API requests that use the temporary + // security credentials will expose the role session name to the external account + // in their CloudTrail logs. The regex used to validate this parameter is a string + // of characters consisting of upper- and lower-case alphanumeric characters with + // no spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + RoleSessionName *string + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for the + // role. The maximum session duration setting can have a value from 1 hour to 12 + // hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web + // Services CLI or Amazon Web Services API role session to a maximum of one hour. + // When you use the AssumeRole API operation to assume a role, you can specify the + // duration of your role session with the DurationSeconds parameter. You can + // specify a parameter value of up to 43200 seconds (12 hours), depending on the + // maximum session duration setting for your role. However, if you assume a role + // using role chaining and provide a DurationSeconds parameter value greater than + // one hour, the operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A + // cross-account role is usually set up to trust everyone in an account. Therefore, + // the administrator of the trusting account might send an external ID to the + // administrator of the trusted account. That way, only someone with the ID can + // assume the role, rather than everyone in the account. For more information about + // the external ID, see How to Use an External ID When Granting Access to Your + // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@:/- + ExternalId *string + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // Reserved for future use. + ProvidedContexts []types.ProvidedContext + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy of + // the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as + // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as + // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter + // is a string of characters consisting of upper- and lower-case alphanumeric + // characters with no spaces. You can also include underscores or any of the + // following characters: =,.@- + SerialNumber *string + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@-. You cannot use a value that begins with the text aws: . This prefix is + // reserved for Amazon Web Services internal use. + SourceIdentity *string + + // A list of session tags that you want to pass. Each session tag consists of a + // key name and an associated value. For more information about session tags, see + // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters, and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // inline session policy, managed policy ARNs, and session tags into a packed + // binary format that has a separate limit. Your request can fail for this limit + // even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. You can pass a session tag with the + // same key as a tag that is already attached to the role. When you do, session + // tags override a role tag with the same key. Tag key–value pairs are not case + // sensitive, but case is preserved. This means that you cannot have separate + // Department and department tag keys. Assume that the role has the Department = + // Marketing tag and you pass the department = engineering session tag. Department + // and department are not saved as separate tags, and the session tag passed in + // the request takes precedence over the role tag. Additionally, if you used + // temporary credentials to perform this operation, the new session inherits any + // transitive session tags from the calling session. If you pass a session tag with + // the same key as an inherited tag, the operation fails. To view the inherited + // tags for a session, see the CloudTrail logs. For more information, see Viewing + // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []types.Tag + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition that + // tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. The format for this parameter, as described by its regex pattern, is a + // sequence of six numeric digits. + TokenCode *string + + // A list of keys for session tags that you want to set as transitive. If you set + // a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. This parameter is optional. When you set session tags as + // transitive, the session policy and session tags packed binary limit is not + // affected. If you choose not to specify a transitive tag key, then no tags are + // passed from this session to any subsequent sessions. + TransitiveTagKeys []string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The source identity specified by the principal that is calling the AssumeRole + // operation. You can require users to specify a source identity when they assume a + // role. You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition key + // to further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRole{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addAssumeRoleResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRole", + } +} + +// PresignAssumeRole is used to generate a presigned HTTP Request which contains +// presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &AssumeRoleInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns, + c.client.addOperationAssumeRoleMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +type opAssumeRoleResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opAssumeRoleResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opAssumeRoleResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addAssumeRoleResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opAssumeRoleResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go new file mode 100644 index 00000000000..9c33720d41a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -0,0 +1,482 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated via a SAML authentication response. This operation provides a +// mechanism for tying an enterprise identity store or directory to role-based +// Amazon Web Services access without user-specific credentials or configuration. +// For a comparison of AssumeRoleWithSAML with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this +// operation consist of an access key ID, a secret access key, and a security +// token. Applications can use these temporary security credentials to sign calls +// to Amazon Web Services services. Session Duration By default, the temporary +// security credentials created by AssumeRoleWithSAML last for one hour. However, +// you can use the optional DurationSeconds parameter to specify the duration of +// your session. Your role session lasts for the duration that you specify, or +// until the time specified in the SAML authentication response's +// SessionNotOnOrAfter value, whichever is shorter. You can provide a +// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour to 12 +// hours. To learn how to view the maximum value for your role, see View the +// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume a +// role using role chaining and provide a DurationSeconds parameter value greater +// than one hour, the operation fails. Permissions The temporary security +// credentials created by AssumeRoleWithSAML can be used to make API calls to any +// Amazon Web Services service with the following exception: you cannot call the +// STS GetFederationToken or GetSessionToken API operations. (Optional) You can +// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of +// Amazon Web Services security credentials. The identity of the caller is +// validated by using keys in the metadata document that is uploaded for the SAML +// provider entity for your identity provider. Calling AssumeRoleWithSAML can +// result in an entry in your CloudTrail logs. The entry includes the value in the +// NameID element of the SAML assertion. We recommend that you use a NameIDType +// that is not associated with any personally identifiable information (PII). For +// example, you could instead use the persistent identifier ( +// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can +// configure your IdP to pass attributes into your SAML assertion as session tags. +// Each session tag consists of a key name and an associated value. For more +// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// inline session policy, managed policy ARNs, and session tags into a packed +// binary format that has a separate limit. Your request can fail for this limit +// even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags for +// your request are to the upper size limit. You can pass a session tag with the +// same key as a tag that is attached to the role. When you do, session tags +// override the role's tags with the same key. An administrator must grant you the +// permissions necessary to pass session tags. The administrator can also create +// granular permissions to allow you to pass only specific session tags. For more +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. SAML Configuration Before your application can call +// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to +// issue the claims required by Amazon Web Services. Additionally, you must use +// Identity and Access Management (IAM) to create a SAML provider entity in your +// Amazon Web Services account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. +// For more information, see the following resources: +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { + if params == nil { + params = &AssumeRoleWithSAMLInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithSAML", params, optFns, c.addOperationAssumeRoleWithSAMLMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithSAMLOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithSAMLInput struct { + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes the + // IdP. + // + // This member is required. + PrincipalArn *string + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // The base64 encoded SAML authentication response provided by the IdP. For more + // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // This member is required. + SAMLAssertion *string + + // The duration, in seconds, of the role session. Your role session lasts for the + // duration that you specify for the DurationSeconds parameter, or until the time + // specified in the SAML authentication response's SessionNotOnOrAfter value, + // whichever is shorter. You can provide a DurationSeconds value from 900 seconds + // (15 minutes) up to the maximum session duration setting for the role. This + // setting can have a value from 1 hour to 12 hours. If you specify a value higher + // than this setting, the operation fails. For example, if you specify a session + // duration of 12 hours, but your administrator set the maximum session duration to + // 6 hours, your operation fails. To learn how to view the maximum value for your + // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type AssumeRoleWithSAMLOutput struct { + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *types.AssumedRoleUser + + // The value of the Recipient attribute of the SubjectConfirmationData element of + // the SAML assertion. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // The value of the Issuer element of the SAML assertion. + Issuer *string + + // A hash value based on the concatenation of the following: + // - The Issuer response value. + // - The Amazon Web Services account ID. + // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // The combination of NameQualifier and Subject can be used to uniquely identify a + // user. The following pseudocode shows how the hash value is calculated: BASE64 ( + // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + NameQualifier *string + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The value in the SourceIdentity attribute in the SAML assertion. You can + // require users to set a source identity value when they assume a role. You do + // this by using the sts:SourceIdentity condition key in a role trust policy. That + // way, actions that are taken with the role are associated with that user. After + // the source identity is set, the value cannot be changed. It is present in the + // request for all actions that are taken by the role and persists across chained + // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML + // assertion. For more information about using source identity, see Monitor and + // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient or + // persistent . If the format includes the prefix + // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . + // If the format includes any other prefix, the format is returned with no + // modifications. + SubjectType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithSAML{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addAssumeRoleWithSAMLResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithSAML", + } +} + +type opAssumeRoleWithSAMLResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opAssumeRoleWithSAMLResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opAssumeRoleWithSAMLResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addAssumeRoleWithSAMLResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opAssumeRoleWithSAMLResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go new file mode 100644 index 00000000000..fa4a6084591 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -0,0 +1,501 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials for users who have been +// authenticated in a mobile or web application with a web identity provider. +// Example providers include the OAuth 2.0 providers Login with Amazon and +// Facebook, or any OpenID Connect-compatible identity provider such as Google or +// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// . For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. To learn more about Amazon +// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not +// require the use of Amazon Web Services security credentials. Therefore, you can +// distribute an application (for example, on mobile devices) that requests +// temporary security credentials without including long-term Amazon Web Services +// credentials in the application. You also don't need to deploy server-based proxy +// services that use long-term Amazon Web Services credentials. Instead, the +// identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. The temporary security credentials returned by this API +// consist of an access key ID, a secret access key, and a security token. +// Applications can use these temporary security credentials to sign calls to +// Amazon Web Services service API operations. Session Duration By default, the +// temporary security credentials created by AssumeRoleWithWebIdentity last for +// one hour. However, you can use the optional DurationSeconds parameter to +// specify the duration of your session. You can provide a value from 900 seconds +// (15 minutes) up to the maximum session duration setting for the role. This +// setting can have a value from 1 hour to 12 hours. To learn how to view the +// maximum value for your role, see View the Maximum Session Duration Setting for +// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you use +// the AssumeRole* API operations or the assume-role* CLI commands. However the +// limit does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. Permissions The temporary security credentials created by +// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web +// Services service with the following exception: you cannot call the STS +// GetFederationToken or GetSessionToken API operations. (Optional) You can pass +// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's +// identity-based policy and the session policies. You can use the role's temporary +// credentials in subsequent Amazon Web Services API calls to access resources in +// the account that owns the role. You cannot use session policies to grant more +// permissions than those allowed by the identity-based policy of the role that is +// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass +// attributes into your web identity token as session tags. Each session tag +// consists of a key name and an associated value. For more information about +// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session +// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. +// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. An Amazon Web Services conversion compresses the passed +// inline session policy, managed policy ARNs, and session tags into a packed +// binary format that has a separate limit. Your request can fail for this limit +// even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags for +// your request are to the upper size limit. You can pass a session tag with the +// same key as a tag that is attached to the role. When you do, the session tag +// overrides the role tag with the same key. An administrator must grant you the +// permissions necessary to pass session tags. The administrator can also create +// granular permissions to allow you to pass only specific session tags. For more +// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. You can set the session tags as transitive. Transitive +// tags persist during role chaining. For more information, see Chaining Roles +// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. Identities Before your application can call +// AssumeRoleWithWebIdentity , you must have an identity token from a supported +// identity provider and create a role that the application can assume. The role +// that your application assumes must trust the identity provider that is +// associated with the identity token. In other words, the identity provider must +// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can +// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you could +// instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) +// . For more information about how to use web identity federation and the +// AssumeRoleWithWebIdentity API, see the following resources: +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// . +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) +// . Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then using +// those credentials to make a request to Amazon Web Services. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// . These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) +// . This article discusses web identity federation and shows an example of how to +// use web identity federation to get access to content in Amazon S3. +func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { + if params == nil { + params = &AssumeRoleWithWebIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "AssumeRoleWithWebIdentity", params, optFns, c.addOperationAssumeRoleWithWebIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*AssumeRoleWithWebIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type AssumeRoleWithWebIdentityInput struct { + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // This member is required. + RoleArn *string + + // An identifier for the assumed role session. Typically, you pass the name or + // identifier that is associated with the user who is using your application. That + // way, the temporary security credentials that your application will use are + // associated with that user. This session name is included as part of the ARN and + // assumed role ID in the AssumedRoleUser response element. The regex used to + // validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@- + // + // This member is required. + RoleSessionName *string + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by the + // identity provider. Your application must get this token by authenticating the + // user who is using your application with a web identity provider before the + // application makes an AssumeRoleWithWebIdentity call. Only tokens with RSA + // algorithms (RS256) are supported. + // + // This member is required. + WebIdentityToken *string + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify a + // session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. By default, the value is set to 3600 seconds. The + // DurationSeconds parameter is separate from the duration of a console session + // that you might request using the returned credentials. The request to the + // federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use the + // role's temporary credentials in subsequent Amazon Web Services API calls to + // access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The plaintext that you use for both inline and managed + // session policies can't exceed 2,048 characters. The JSON policy characters can + // be any ASCII character from the space character to the end of the valid + // character list (\u0020 through \u00FF). It can also include the tab (\u0009), + // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web + // Services conversion compresses the passed inline session policy, managed policy + // ARNs, and session tags into a packed binary format that has a separate limit. + // Your request can fail for this limit even if your plaintext meets the other + // requirements. The PackedPolicySize response element indicates by percentage how + // close the policies and tags for your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as managed session policies. The policies must exist in the same account as + // the role. This parameter is optional. You can provide up to 10 managed policy + // ARNs. However, the plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. For more information about ARNs, see + // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. An Amazon Web Services conversion + // compresses the passed inline session policy, managed policy ARNs, and session + // tags into a packed binary format that has a separate limit. Your request can + // fail for this limit even if your plaintext meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Passing policies to this + // operation returns new temporary credentials. The resulting session's permissions + // are the intersection of the role's identity-based policy and the session + // policies. You can use the role's temporary credentials in subsequent Amazon Web + // Services API calls to access resources in the account that owns the role. You + // cannot use session policies to grant more permissions than those allowed by the + // identity-based policy of the role that is being assumed. For more information, + // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []types.PolicyDescriptorType + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. Do not specify this value for OpenID Connect ID tokens. + ProviderId *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. For + // example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the + // RoleSessionName that you specified when you called AssumeRole . + AssumedRoleUser *types.AssumedRoleUser + + // The intended audience (also known as client ID) of the web identity token. This + // is traditionally the client identifier issued to the application that requested + // the web identity token. + Audience *string + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. The size of the security token that STS API + // operations return is not fixed. We strongly recommend that you make no + // assumptions about the maximum size. + Credentials *types.Credentials + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed in + // the AssumeRoleWithWebIdentity request. + Provider *string + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. You can require users to set a source identity value + // when they assume a role. You do this by using the sts:SourceIdentity condition + // key in a role trust policy. That way, actions that are taken with the role are + // associated with that user. After the source identity is set, the value cannot be + // changed. It is present in the request for all actions that are taken by the role + // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute + // associated with your users, like user name or email, as the source identity when + // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON + // web token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + SourceIdentity *string + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with the + // AssumeRoleWithWebIdentity call. The identifier is typically unique to the user + // and the application that acquired the WebIdentityToken (pairwise identifier). + // For OpenID Connect ID tokens, this field contains the value returned by the + // identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoleWithWebIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addAssumeRoleWithWebIdentityResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "AssumeRoleWithWebIdentity", + } +} + +type opAssumeRoleWithWebIdentityResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opAssumeRoleWithWebIdentityResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opAssumeRoleWithWebIdentityResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addAssumeRoleWithWebIdentityResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opAssumeRoleWithWebIdentityResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go new file mode 100644 index 00000000000..baf2f96866b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -0,0 +1,285 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decodes additional information about the authorization status of a request from +// an encoded message returned in response to an Amazon Web Services request. For +// example, if a user is not authorized to perform an operation that he or she has +// requested, the request returns a Client.UnauthorizedOperation response (an HTTP +// 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. Only +// certain Amazon Web Services operations return an encoded authorization message. +// The documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. The message is +// encoded because the details of the authorization status can contain privileged +// information that the user who requested the operation should not see. To decode +// an authorization status message, a user must be granted permissions through an +// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) +// action. The decoded message includes the following type of information: +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether a +// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// - The principal who made the request. +// - The requested action. +// - The requested resource. +// - The values of condition keys in the context of the user's request. +func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { + if params == nil { + params = &DecodeAuthorizationMessageInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DecodeAuthorizationMessage", params, optFns, c.addOperationDecodeAuthorizationMessageMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecodeAuthorizationMessageOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecodeAuthorizationMessageInput struct { + + // The encoded message that was returned with the response. + // + // This member is required. + EncodedMessage *string + + noSmithyDocumentSerde +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + + // The API returns a response with the decoded message. + DecodedMessage *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDecodeAuthorizationMessage{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addDecodeAuthorizationMessageResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "DecodeAuthorizationMessage", + } +} + +type opDecodeAuthorizationMessageResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opDecodeAuthorizationMessageResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opDecodeAuthorizationMessageResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addDecodeAuthorizationMessageResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opDecodeAuthorizationMessageResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go new file mode 100644 index 00000000000..f1dd167da93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -0,0 +1,278 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the account identifier for the specified access key ID. Access keys +// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and +// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). +// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. When you pass an access key ID to this operation, it +// returns the ID of the Amazon Web Services account to which the keys belong. +// Access key IDs beginning with AKIA are long-term credentials for an IAM user or +// the Amazon Web Services account root user. Access key IDs beginning with ASIA +// are temporary credentials that are created using STS operations. If the account +// in the response belongs to you, you can sign in as the root user and review your +// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. This operation does not indicate the state of the access +// key. The key might be active, inactive, or deleted. Active keys might not have +// permissions to perform an operation. Providing a deleted access key might return +// an error that the key doesn't exist. +func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { + if params == nil { + params = &GetAccessKeyInfoInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetAccessKeyInfo", params, optFns, c.addOperationGetAccessKeyInfoMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetAccessKeyInfoOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetAccessKeyInfoInput struct { + + // The identifier of an access key. This parameter allows (through its regex + // pattern) a string of characters that can consist of any upper- or lowercase + // letter or digit. + // + // This member is required. + AccessKeyId *string + + noSmithyDocumentSerde +} + +type GetAccessKeyInfoOutput struct { + + // The number used to identify the Amazon Web Services account. + Account *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetAccessKeyInfo{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetAccessKeyInfoResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetAccessKeyInfo", + } +} + +type opGetAccessKeyInfoResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetAccessKeyInfoResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetAccessKeyInfoResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetAccessKeyInfoResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetAccessKeyInfoResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go new file mode 100644 index 00000000000..66e5d99d491 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -0,0 +1,294 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns details about the IAM user or role whose credentials are used to call +// the operation. No permissions are required to perform this operation. If an +// administrator attaches a policy to your identity that explicitly denies access +// to the sts:GetCallerIdentity action, you can still perform this operation. +// Permissions are not required because the same information is returned when +// access is denied. To view an example response, see I Am Not Authorized to +// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetCallerIdentity", params, optFns, c.addOperationGetCallerIdentityMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetCallerIdentityOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetCallerIdentityInput struct { + noSmithyDocumentSerde +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed in + // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetCallerIdentity{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetCallerIdentityResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetCallerIdentity", + } +} + +// PresignGetCallerIdentity is used to generate a presigned HTTP Request which +// contains presigned URL, signed headers and HTTP method used. +func (c *PresignClient) PresignGetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) { + if params == nil { + params = &GetCallerIdentityInput{} + } + options := c.options.copy() + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + + result, _, err := c.client.invokeOperation(ctx, "GetCallerIdentity", params, clientOptFns, + c.client.addOperationGetCallerIdentityMiddlewares, + presignConverter(options).convertToPresignMiddleware, + ) + if err != nil { + return nil, err + } + + out := result.(*v4.PresignedHTTPRequest) + return out, nil +} + +type opGetCallerIdentityResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetCallerIdentityResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetCallerIdentityResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetCallerIdentityResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetCallerIdentityResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go new file mode 100644 index 00000000000..d577ef686e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -0,0 +1,445 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary security credentials (consisting of an access key +// ID, a secret access key, and a security token) for a user. A typical use is in a +// proxy application that gets temporary security credentials on behalf of +// distributed applications inside a corporate network. You must call the +// GetFederationToken operation using the long-term security credentials of an IAM +// user. As a result, this call is appropriate in contexts where those credentials +// can be safeguarded, usually in a server-based application. For a comparison of +// GetFederationToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. Although it is possible to call GetFederationToken using +// the security credentials of an Amazon Web Services account root user rather than +// an IAM user that you create for the purpose of a proxy application, we do not +// recommend it. For more information, see Safeguard your root user credentials +// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. Session duration The temporary credentials are valid for +// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). +// Temporary credentials obtained by using the root user credentials have a maximum +// duration of 3,600 seconds (1 hour). Permissions You can use the temporary +// credentials created by GetFederationToken in any Amazon Web Services service +// with the following exceptions: +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// - You cannot call any STS operations except GetCallerIdentity . +// +// You can use temporary credentials for single sign-on (SSO) to the console. You +// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that you +// use for both inline and managed session policies can't exceed 2,048 characters. +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM user +// policies and the session policies that you pass. This gives you a way to further +// restrict the permissions for a federated user. You cannot use session policies +// to grant more permissions than those that are defined in the permissions policy +// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to create +// temporary security credentials, see GetFederationToken—Federation Through a +// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) +// . You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session in the +// Principal element of the policy, the session has the permissions allowed by the +// policy. These permissions are granted in addition to the permissions granted by +// the session policies. Tags (Optional) You can pass tag key-value pairs to your +// session. These are called session tags. For more information about session tags, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You can create a mobile-based or browser-based app that +// can authenticate users using a web identity provider like Login with Amazon, +// Facebook, Google, or an OpenID Connect-compatible identity provider. In this +// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) +// or AssumeRoleWithWebIdentity . For more information, see Federation Through a +// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. An administrator must grant you the permissions necessary +// to pass session tags. The administrator can also create granular permissions to +// allow you to pass only specific session tags. For more information, see +// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is +// preserved. This means that you cannot have separate Department and department +// tag keys. Assume that the user that you are federating has the Department = +// Marketing tag and you pass the department = engineering session tag. Department +// and department are not saved as separate tags, and the session tag passed in +// the request takes precedence over the user tag. +func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { + if params == nil { + params = &GetFederationTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetFederationToken", params, optFns, c.addOperationGetFederationTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetFederationTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetFederationTokenInput struct { + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob ). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon S3 + // bucket policy. The regex used to validate this parameter is a string of + // characters consisting of upper- and lower-case alphanumeric characters with no + // spaces. You can also include underscores or any of the following characters: + // =,.@- + // + // This member is required. + Name *string + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds (one + // hour). If the specified duration is longer than one hour, the session obtained + // by using root user credentials defaults to one hour. + DurationSeconds *int32 + + // An IAM policy in JSON format that you want to use as an inline session policy. + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. This parameter is + // optional. However, if you do not pass any session policies, then the resulting + // federated user session has no permissions. When you pass session policies, the + // session permissions are the intersection of the IAM user policies and the + // session policies that you pass. This gives you a way to further restrict the + // permissions for a federated user. You cannot use session policies to grant more + // permissions than those that are defined in the permissions policy of the IAM + // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. An Amazon Web Services conversion compresses the + // passed inline session policy, managed policy ARNs, and session tags into a + // packed binary format that has a separate limit. Your request can fail for this + // limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. + Policy *string + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to + // use as a managed session policy. The policies must exist in the same account as + // the IAM user that is requesting federated access. You must pass an inline or + // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that you + // use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about ARNs, + // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. This parameter is optional. + // However, if you do not pass any session policies, then the resulting federated + // user session has no permissions. When you pass session policies, the session + // permissions are the intersection of the IAM user policies and the session + // policies that you pass. This gives you a way to further restrict the permissions + // for a federated user. You cannot use session policies to grant more permissions + // than those that are defined in the permissions policy of the IAM user. For more + // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. The resulting credentials can be used to access a + // resource that has a resource-based policy. If that policy specifically + // references the federated user session in the Principal element of the policy, + // the session has the permissions allowed by the policy. These permissions are + // granted in addition to the permissions that are granted by the session policies. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + PolicyArns []types.PolicyDescriptorType + + // A list of session tags. Each session tag consists of a key name and an + // associated value. For more information about session tags, see Passing Session + // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. This parameter is optional. You can pass up to 50 session + // tags. The plaintext session tag keys can’t exceed 128 characters and the values + // can’t exceed 256 characters. For these and additional limits, see IAM and STS + // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. An Amazon Web Services conversion compresses the passed + // inline session policy, managed policy ARNs, and session tags into a packed + // binary format that has a separate limit. Your request can fail for this limit + // even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags for + // your request are to the upper size limit. You can pass a session tag with the + // same key as a tag that is already attached to the user you are federating. When + // you do, session tags override a user tag with the same key. Tag key–value pairs + // are not case sensitive, but case is preserved. This means that you cannot have + // separate Department and department tag keys. Assume that the role has the + // Department = Marketing tag and you pass the department = engineering session + // tag. Department and department are not saved as separate tags, and the session + // tag passed in the request takes precedence over the role tag. + Tags []types.Tag + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetFederationTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Identifiers for the federated user associated with the credentials (such as + // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use + // the federated user's ARN in your resource-based policies, such as an Amazon S3 + // bucket policy. + FederatedUser *types.FederatedUser + + // A percentage value that indicates the packed size of the session policies and + // session tags combined passed in the request. The request fails if the packed + // size is greater than 100 percent, which means the policies and tags exceeded the + // allowed space. + PackedPolicySize *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetFederationToken{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetFederationTokenResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetFederationToken", + } +} + +type opGetFederationTokenResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetFederationTokenResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetFederationTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetFederationTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetFederationTokenResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go new file mode 100644 index 00000000000..7a2345e8031 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -0,0 +1,328 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a set of temporary credentials for an Amazon Web Services account or +// IAM user. The credentials consist of an access key ID, a secret access key, and +// a security token. Typically, you use GetSessionToken if you want to use MFA to +// protect programmatic calls to specific Amazon Web Services API operations like +// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and +// submit an MFA code that is associated with their MFA device. Using the temporary +// security credentials that the call returns, IAM users can then make programmatic +// calls to API operations that require MFA authentication. An incorrect MFA code +// causes the API to return an access denied error. For a comparison of +// GetSessionToken with the other API operations that produce temporary +// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. No permissions are required for users to perform this +// operation. The purpose of the sts:GetSessionToken operation is to authenticate +// the user using MFA. You cannot use policies to control authentication +// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. Session Duration The GetSessionToken operation must be +// called by using the long-term Amazon Web Services security credentials of an IAM +// user. Credentials that are created by IAM users are valid for the duration that +// you specify. This duration can range from 900 seconds (15 minutes) up to a +// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 +// hours). Credentials based on account credentials can range from 900 seconds (15 +// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The +// temporary security credentials created by GetSessionToken can be used to make +// API calls to any Amazon Web Services service with the following exceptions: +// - You cannot call any IAM API operations unless MFA authentication +// information is included in the request. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity . +// +// The credentials that GetSessionToken returns are based on permissions +// associated with the IAM user whose credentials were used to call the operation. +// The temporary credentials have the same permissions as the IAM user. Although it +// is possible to call GetSessionToken using the security credentials of an Amazon +// Web Services account root user rather than an IAM user, we do not recommend it. +// If GetSessionToken is called using root user credentials, the temporary +// credentials have root user permissions. For more information, see Safeguard +// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide For more information about using GetSessionToken to +// create temporary credentials, see Temporary Credentials for Users in Untrusted +// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { + if params == nil { + params = &GetSessionTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetSessionToken", params, optFns, c.addOperationGetSessionTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetSessionTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetSessionTokenInput struct { + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions for + // Amazon Web Services account owners are restricted to a maximum of 3,600 seconds + // (one hour). If the duration is longer than one hour, the session for Amazon Web + // Services account owners defaults to one hour. + DurationSeconds *int32 + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM user + // has a policy that requires MFA authentication. The value is either the serial + // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name + // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You + // can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. The regex used + // to validate this parameter is a string of characters consisting of upper- and + // lower-case alphanumeric characters with no spaces. You can also include + // underscores or any of the following characters: =,.@:/- + SerialNumber *string + + // The value provided by the MFA device, if MFA is required. If any policy + // requires the IAM user to submit an MFA code, specify this value. If MFA + // authentication is required, the user must provide a code when requesting a set + // of temporary security credentials. A user who fails to provide the code receives + // an "access denied" response when requesting resources that require MFA + // authentication. The format for this parameter, as described by its regex + // pattern, is a sequence of six numeric digits. + TokenCode *string + + noSmithyDocumentSerde +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon Web +// Services requests. +type GetSessionTokenOutput struct { + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. The size of the security token + // that STS API operations return is not fixed. We strongly recommend that you make + // no assumptions about the maximum size. + Credentials *types.Credentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpGetSessionToken{}, middleware.After) + if err != nil { + return err + } + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addGetSessionTokenResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "sts", + OperationName: "GetSessionToken", + } +} + +type opGetSessionTokenResolveEndpointMiddleware struct { + EndpointResolver EndpointResolverV2 + BuiltInResolver builtInParameterResolver +} + +func (*opGetSessionTokenResolveEndpointMiddleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *opGetSessionTokenResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.EndpointResolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := EndpointParameters{} + + m.BuiltInResolver.ResolveBuiltIns(¶ms) + + var resolvedEndpoint smithyendpoints.Endpoint + resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL = &resolvedEndpoint.URI + + for k := range resolvedEndpoint.Headers { + req.Header.Set( + k, + resolvedEndpoint.Headers.Get(k), + ) + } + + authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) + if err != nil { + var nfe *internalauth.NoAuthenticationSchemesFoundError + if errors.As(err, &nfe) { + // if no auth scheme is found, default to sigv4 + signingName := "sts" + signingRegion := m.BuiltInResolver.(*builtInResolver).Region + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + + } + var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError + if errors.As(err, &ue) { + return out, metadata, fmt.Errorf( + "This operation requests signer version(s) %v but the client only supports %v", + ue.UnsupportedSchemes, + internalauth.SupportedSchemes, + ) + } + } + + for _, authScheme := range authSchemes { + switch authScheme.(type) { + case *internalauth.AuthenticationSchemeV4: + v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) + var signingName, signingRegion string + if v4Scheme.SigningName == nil { + signingName = "sts" + } else { + signingName = *v4Scheme.SigningName + } + if v4Scheme.SigningRegion == nil { + signingRegion = m.BuiltInResolver.(*builtInResolver).Region + } else { + signingRegion = *v4Scheme.SigningRegion + } + if v4Scheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) + break + case *internalauth.AuthenticationSchemeV4A: + v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) + if v4aScheme.SigningName == nil { + v4aScheme.SigningName = aws.String("sts") + } + if v4aScheme.DisableDoubleEncoding != nil { + // The signer sets an equivalent value at client initialization time. + // Setting this context value will cause the signer to extract it + // and override the value set at client initialization time. + ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) + } + ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) + ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) + break + case *internalauth.AuthenticationSchemeNone: + break + } + } + + return next.HandleSerialize(ctx, in) +} + +func addGetSessionTokenResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert(&opGetSessionTokenResolveEndpointMiddleware{ + EndpointResolver: options.EndpointResolverV2, + BuiltInResolver: &builtInResolver{ + Region: options.Region, + UseDualStack: options.EndpointOptions.UseDualStackEndpoint, + UseFIPS: options.EndpointOptions.UseFIPSEndpoint, + Endpoint: options.BaseEndpoint, + }, + }, "ResolveEndpoint", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go new file mode 100644 index 00000000000..5d634ce35c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -0,0 +1,2507 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + awsxml "github.com/aws/aws-sdk-go-v2/aws/protocol/xml" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + smithyxml "github.com/aws/smithy-go/encoding/xml" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strconv" + "strings" +) + +type awsAwsquery_deserializeOpAssumeRole struct { +} + +func (*awsAwsquery_deserializeOpAssumeRole) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRole(response, &metadata) + } + output := &AssumeRoleOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRole(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithSAML) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response, &metadata) + } + output := &AssumeRoleWithSAMLOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithSAMLResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithSAML(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response, &metadata) + } + output := &AssumeRoleWithWebIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("AssumeRoleWithWebIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("IDPCommunicationError", errorCode): + return awsAwsquery_deserializeErrorIDPCommunicationErrorException(response, errorBody) + + case strings.EqualFold("IDPRejectedClaim", errorCode): + return awsAwsquery_deserializeErrorIDPRejectedClaimException(response, errorBody) + + case strings.EqualFold("InvalidIdentityToken", errorCode): + return awsAwsquery_deserializeErrorInvalidIdentityTokenException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_deserializeOpDecodeAuthorizationMessage) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response, &metadata) + } + output := &DecodeAuthorizationMessageOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DecodeAuthorizationMessageResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDecodeAuthorizationMessage(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("InvalidAuthorizationMessageException", errorCode): + return awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_deserializeOpGetAccessKeyInfo) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response, &metadata) + } + output := &GetAccessKeyInfoOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetAccessKeyInfoResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetAccessKeyInfo(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_deserializeOpGetCallerIdentity) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetCallerIdentity(response, &metadata) + } + output := &GetCallerIdentityOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetCallerIdentityResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetCallerIdentity(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetFederationToken struct { +} + +func (*awsAwsquery_deserializeOpGetFederationToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetFederationToken(response, &metadata) + } + output := &GetFederationTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetFederationTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetFederationToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("MalformedPolicyDocument", errorCode): + return awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("PackedPolicyTooLarge", errorCode): + return awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response, errorBody) + + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpGetSessionToken struct { +} + +func (*awsAwsquery_deserializeOpGetSessionToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorGetSessionToken(response, &metadata) + } + output := &GetSessionTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("GetSessionTokenResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorGetSessionToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("RegionDisabledException", errorCode): + return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsquery_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentExpiredTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPCommunicationErrorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPCommunicationErrorException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPCommunicationErrorException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorIDPRejectedClaimException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.IDPRejectedClaimException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentIDPRejectedClaimException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidAuthorizationMessageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidAuthorizationMessageException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorInvalidIdentityTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidIdentityTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidIdentityTokenException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MalformedPolicyDocumentException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorPackedPolicyTooLargeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PackedPolicyTooLargeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorRegionDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.RegionDisabledException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentRegionDisabledException(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeDocumentAssumedRoleUser(v **types.AssumedRoleUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.AssumedRoleUser + if *v == nil { + sv = &types.AssumedRoleUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("AssumedRoleId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AssumedRoleId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentCredentials(v **types.Credentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Credentials + if *v == nil { + sv = &types.Credentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentFederatedUser(v **types.FederatedUser, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.FederatedUser + if *v == nil { + sv = &types.FederatedUser{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("FederatedUserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.FederatedUserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPCommunicationErrorException(v **types.IDPCommunicationErrorException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPCommunicationErrorException + if *v == nil { + sv = &types.IDPCommunicationErrorException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIDPRejectedClaimException(v **types.IDPRejectedClaimException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IDPRejectedClaimException + if *v == nil { + sv = &types.IDPRejectedClaimException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidAuthorizationMessageException(v **types.InvalidAuthorizationMessageException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidAuthorizationMessageException + if *v == nil { + sv = &types.InvalidAuthorizationMessageException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentInvalidIdentityTokenException(v **types.InvalidIdentityTokenException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidIdentityTokenException + if *v == nil { + sv = &types.InvalidIdentityTokenException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentPackedPolicyTooLargeException(v **types.PackedPolicyTooLargeException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PackedPolicyTooLargeException + if *v == nil { + sv = &types.PackedPolicyTooLargeException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentRegionDisabledException(v **types.RegionDisabledException, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RegionDisabledException + if *v == nil { + sv = &types.RegionDisabledException{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleOutput(v **AssumeRoleOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleOutput + if *v == nil { + sv = &AssumeRoleOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithSAMLOutput(v **AssumeRoleWithSAMLOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithSAMLOutput + if *v == nil { + sv = &AssumeRoleWithSAMLOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Issuer", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Issuer = ptr.String(xtv) + } + + case strings.EqualFold("NameQualifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NameQualifier = ptr.String(xtv) + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("Subject", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Subject = ptr.String(xtv) + } + + case strings.EqualFold("SubjectType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectType = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **AssumeRoleWithWebIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *AssumeRoleWithWebIdentityOutput + if *v == nil { + sv = &AssumeRoleWithWebIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AssumedRoleUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentAssumedRoleUser(&sv.AssumedRoleUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Audience", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Audience = ptr.String(xtv) + } + + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("Provider", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Provider = ptr.String(xtv) + } + + case strings.EqualFold("SourceIdentity", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SourceIdentity = ptr.String(xtv) + } + + case strings.EqualFold("SubjectFromWebIdentityToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SubjectFromWebIdentityToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DecodeAuthorizationMessageOutput + if *v == nil { + sv = &DecodeAuthorizationMessageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DecodedMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DecodedMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetAccessKeyInfoOutput(v **GetAccessKeyInfoOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetAccessKeyInfoOutput + if *v == nil { + sv = &GetAccessKeyInfoOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetCallerIdentityOutput(v **GetCallerIdentityOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetCallerIdentityOutput + if *v == nil { + sv = &GetCallerIdentityOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Account", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Account = ptr.String(xtv) + } + + case strings.EqualFold("Arn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Arn = ptr.String(xtv) + } + + case strings.EqualFold("UserId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.UserId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetFederationTokenOutput(v **GetFederationTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetFederationTokenOutput + if *v == nil { + sv = &GetFederationTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("FederatedUser", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentFederatedUser(&sv.FederatedUser, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("PackedPolicySize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.PackedPolicySize = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentGetSessionTokenOutput(v **GetSessionTokenOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetSessionTokenOutput + if *v == nil { + sv = &GetSessionTokenOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go new file mode 100644 index 00000000000..d963fd8d19a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -0,0 +1,11 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package sts provides the API client, operations, and parameter types for AWS +// Security Token Service. +// +// Security Token Service Security Token Service (STS) enables you to request +// temporary, limited-privilege credentials for users. This guide provides +// descriptions of the STS API. For more information about using this service, see +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// . +package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go new file mode 100644 index 00000000000..cb5d56fd9c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -0,0 +1,996 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" + smithy "github.com/aws/smithy-go" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "net/url" + "os" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "sts" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. +// +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// Utility function to aid with translating pseudo-regions to classical regions +// with the appropriate setting indicated by the pseudo-region +func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(pr, fipsInfix) || + strings.Contains(pr, fipsPrefix) || + strings.Contains(pr, fipsSuffix) { + region = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + pr, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + fips = aws.FIPSEndpointStateEnabled + } else { + region = pr + } + + return region, fips +} + +// builtInParameterResolver is the interface responsible for resolving BuiltIn +// values during the sourcing of EndpointParameters +type builtInParameterResolver interface { + ResolveBuiltIns(*EndpointParameters) error +} + +// builtInResolver resolves modeled BuiltIn values using only the members defined +// below. +type builtInResolver struct { + // The AWS region used to dispatch the request. + Region string + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseDualStack aws.DualStackEndpointState + + // Sourced BuiltIn value in a historical enabled or disabled state. + UseFIPS aws.FIPSEndpointState + + // Base endpoint that can potentially be modified during Endpoint resolution. + Endpoint *string + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + UseGlobalEndpoint bool +} + +// Invoked at runtime to resolve BuiltIn Values. Only resolution code specific to +// each BuiltIn value is generated. +func (b *builtInResolver) ResolveBuiltIns(params *EndpointParameters) error { + + region, _ := mapPseudoRegion(b.Region) + if len(region) == 0 { + return fmt.Errorf("Could not resolve AWS::Region") + } else { + params.Region = aws.String(region) + } + if b.UseDualStack == aws.DualStackEndpointStateEnabled { + params.UseDualStack = aws.Bool(true) + } else { + params.UseDualStack = aws.Bool(false) + } + if b.UseFIPS == aws.FIPSEndpointStateEnabled { + params.UseFIPS = aws.Bool(true) + } else { + params.UseFIPS = aws.Bool(false) + } + params.Endpoint = b.Endpoint + params.UseGlobalEndpoint = aws.Bool(b.UseGlobalEndpoint) + return nil +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + // + // Defaults to false if no value is + // provided. + // + // AWS::STS::UseGlobalEndpoint + UseGlobalEndpoint *bool +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + if p.UseGlobalEndpoint == nil { + return fmt.Errorf("parameter UseGlobalEndpoint is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + + if p.UseGlobalEndpoint == nil { + p.UseGlobalEndpoint = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseDualStack := *params.UseDualStack + _UseFIPS := *params.UseFIPS + _UseGlobalEndpoint := *params.UseGlobalEndpoint + + if _UseGlobalEndpoint == true { + if !(params.Endpoint != nil) { + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == false { + if _UseDualStack == false { + if _Region == "ap-northeast-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-south-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-southeast-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ap-southeast-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "aws-global" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "ca-central-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-central-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-north-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "eu-west-3" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "sa-east-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-east-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-east-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-west-1" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + if _Region == "us-west-2" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": _Region, + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported") + } + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported") + } + uriString := _Endpoint + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _PartitionResult := *exprVal + _ = _PartitionResult + if _UseFIPS == true { + if _UseDualStack == true { + if true == _PartitionResult.SupportsFIPS { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both") + } + } + if _UseFIPS == true { + if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS") + } + if _UseDualStack == true { + if true == _PartitionResult.SupportsDualStack { + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DualStackDnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") + } + if _Region == "aws-global" { + uriString := "https://sts.amazonaws.com" + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("authSchemes", []interface{}{ + map[string]interface{}{ + "name": "sigv4", + "signingName": "sts", + "signingRegion": "us-east-1", + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://sts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_PartitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json new file mode 100644 index 00000000000..e44e7d149c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -0,0 +1,38 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_AssumeRole.go", + "api_op_AssumeRoleWithSAML.go", + "api_op_AssumeRoleWithWebIdentity.go", + "api_op_DecodeAuthorizationMessage.go", + "api_op_GetAccessKeyInfo.go", + "api_op_GetCallerIdentity.go", + "api_op_GetFederationToken.go", + "api_op_GetSessionToken.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/sts", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go new file mode 100644 index 00000000000..f934c18f6a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package sts + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.25.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go new file mode 100644 index 00000000000..ca4c881909a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -0,0 +1,509 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver STS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "aws-global", + }: endpoints.Endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "sts.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "sts-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "sts.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go new file mode 100644 index 00000000000..4c08061c0c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go @@ -0,0 +1,862 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/query" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsquery_serializeOpAssumeRole struct { +} + +func (*awsAwsquery_serializeOpAssumeRole) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRole") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithSAML struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithSAML") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpAssumeRoleWithWebIdentity struct { +} + +func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("AssumeRoleWithWebIdentity") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpDecodeAuthorizationMessage struct { +} + +func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DecodeAuthorizationMessage") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetAccessKeyInfo struct { +} + +func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetAccessKeyInfo") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetCallerIdentity struct { +} + +func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetCallerIdentityInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetCallerIdentity") + body.Key("Version").String("2011-06-15") + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetFederationToken struct { +} + +func (*awsAwsquery_serializeOpGetFederationToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetFederationTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetFederationToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetFederationTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsquery_serializeOpGetSessionToken struct { +} + +func (*awsAwsquery_serializeOpGetSessionToken) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetSessionTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("GetSessionToken") + body.Key("Version").String("2011-06-15") + + if err := awsAwsquery_serializeOpDocumentGetSessionTokenInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentPolicyDescriptorType(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptorType, value query.Value) error { + object := value.Object() + _ = object + + if v.Arn != nil { + objectKey := object.Key("arn") + objectKey.String(*v.Arn) + } + + return nil +} + +func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error { + object := value.Object() + _ = object + + if v.ContextAssertion != nil { + objectKey := object.Key("ContextAssertion") + objectKey.String(*v.ContextAssertion) + } + + if v.ProviderArn != nil { + objectKey := object.Key("ProviderArn") + objectKey.String(*v.ProviderArn) + } + + return nil +} + +func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error { + object := value.Object() + _ = object + + if v.Key != nil { + objectKey := object.Key("Key") + objectKey.String(*v.Key) + } + + if v.Value != nil { + objectKey := object.Key("Value") + objectKey.String(*v.Value) + } + + return nil +} + +func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error { + array := value.Array("member") + + for i := range v { + av := array.Value() + if err := awsAwsquery_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.ExternalId != nil { + objectKey := object.Key("ExternalId") + objectKey.String(*v.ExternalId) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProvidedContexts != nil { + objectKey := object.Key("ProvidedContexts") + if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil { + return err + } + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.SourceIdentity != nil { + objectKey := object.Key("SourceIdentity") + objectKey.String(*v.SourceIdentity) + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + if v.TransitiveTagKeys != nil { + objectKey := object.Key("TransitiveTagKeys") + if err := awsAwsquery_serializeDocumentTagKeyListType(v.TransitiveTagKeys, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.PrincipalArn != nil { + objectKey := object.Key("PrincipalArn") + objectKey.String(*v.PrincipalArn) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.SAMLAssertion != nil { + objectKey := object.Key("SAMLAssertion") + objectKey.String(*v.SAMLAssertion) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.ProviderId != nil { + objectKey := object.Key("ProviderId") + objectKey.String(*v.ProviderId) + } + + if v.RoleArn != nil { + objectKey := object.Key("RoleArn") + objectKey.String(*v.RoleArn) + } + + if v.RoleSessionName != nil { + objectKey := object.Key("RoleSessionName") + objectKey.String(*v.RoleSessionName) + } + + if v.WebIdentityToken != nil { + objectKey := object.Key("WebIdentityToken") + objectKey.String(*v.WebIdentityToken) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error { + object := value.Object() + _ = object + + if v.EncodedMessage != nil { + objectKey := object.Key("EncodedMessage") + objectKey.String(*v.EncodedMessage) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetAccessKeyInfoInput(v *GetAccessKeyInfoInput, value query.Value) error { + object := value.Object() + _ = object + + if v.AccessKeyId != nil { + objectKey := object.Key("AccessKeyId") + objectKey.String(*v.AccessKeyId) + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetCallerIdentityInput(v *GetCallerIdentityInput, value query.Value) error { + object := value.Object() + _ = object + + return nil +} + +func awsAwsquery_serializeOpDocumentGetFederationTokenInput(v *GetFederationTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.Name != nil { + objectKey := object.Key("Name") + objectKey.String(*v.Name) + } + + if v.Policy != nil { + objectKey := object.Key("Policy") + objectKey.String(*v.Policy) + } + + if v.PolicyArns != nil { + objectKey := object.Key("PolicyArns") + if err := awsAwsquery_serializeDocumentPolicyDescriptorListType(v.PolicyArns, objectKey); err != nil { + return err + } + } + + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagListType(v.Tags, objectKey); err != nil { + return err + } + } + + return nil +} + +func awsAwsquery_serializeOpDocumentGetSessionTokenInput(v *GetSessionTokenInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DurationSeconds != nil { + objectKey := object.Key("DurationSeconds") + objectKey.Integer(*v.DurationSeconds) + } + + if v.SerialNumber != nil { + objectKey := object.Key("SerialNumber") + objectKey.String(*v.SerialNumber) + } + + if v.TokenCode != nil { + objectKey := object.Key("TokenCode") + objectKey.String(*v.TokenCode) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go new file mode 100644 index 00000000000..097875b279b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -0,0 +1,244 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The web identity token that was passed is expired or is not valid. Get a new +// identity token from the identity provider and then retry the request. +type ExpiredTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "ExpiredTokenException" + } + return *e.ErrorCodeOverride +} +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request could not be fulfilled because the identity provider (IDP) that was +// asked to verify the incoming identity token could not be reached. This is often +// a transient error caused by network conditions. Retry the request a limited +// number of times so that you don't exceed the request rate. If the error +// persists, the identity provider might be down or not responding. +type IDPCommunicationErrorException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPCommunicationErrorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPCommunicationErrorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPCommunicationErrorException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPCommunicationError" + } + return *e.ErrorCodeOverride +} +func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The identity provider (IdP) reported that authentication failed. This might be +// because the claim is invalid. If this error is returned for the +// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired +// or has been explicitly revoked. +type IDPRejectedClaimException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *IDPRejectedClaimException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IDPRejectedClaimException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IDPRejectedClaimException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "IDPRejectedClaim" + } + return *e.ErrorCodeOverride +} +func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +type InvalidAuthorizationMessageException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidAuthorizationMessageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAuthorizationMessageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAuthorizationMessageException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidAuthorizationMessageException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry the +// request. +type InvalidIdentityTokenException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidIdentityTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidIdentityTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidIdentityTokenException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidIdentityToken" + } + return *e.ErrorCodeOverride +} +func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +type MalformedPolicyDocumentException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MalformedPolicyDocument" + } + return *e.ErrorCodeOverride +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session tags +// into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper size +// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. You could receive this error even though you meet other +// defined session policy and session tag limits. For more information, see IAM +// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +type PackedPolicyTooLargeException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PackedPolicyTooLargeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PackedPolicyTooLargeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PackedPolicyTooLargeException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PackedPolicyTooLarge" + } + return *e.ErrorCodeOverride +} +func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +type RegionDisabledException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *RegionDisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *RegionDisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *RegionDisabledException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "RegionDisabledException" + } + return *e.ErrorCodeOverride +} +func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go new file mode 100644 index 00000000000..572a7051225 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -0,0 +1,130 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // This member is required. + Arn *string + + // A unique identifier that contains the role ID and the role session name of the + // role that is being assumed. The role ID is generated by Amazon Web Services when + // the role is created. + // + // This member is required. + AssumedRoleId *string + + noSmithyDocumentSerde +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + + // The access key ID that identifies the temporary security credentials. + // + // This member is required. + AccessKeyId *string + + // The date on which the current credentials expire. + // + // This member is required. + Expiration *time.Time + + // The secret access key that can be used to sign requests. + // + // This member is required. + SecretAccessKey *string + + // The token that users must pass to the service API to use the temporary + // credentials. + // + // This member is required. + SessionToken *string + + noSmithyDocumentSerde +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + + // The ARN that specifies the federated user that is associated with the + // credentials. For more information about ARNs and how to use them in policies, + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // This member is required. + Arn *string + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // This member is required. + FederatedUserId *string + + noSmithyDocumentSerde +} + +// A reference to the IAM managed policy that is passed as a session policy for a +// role session or a federated user session. +type PolicyDescriptorType struct { + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource Names + // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string + + noSmithyDocumentSerde +} + +// Reserved for future use. +type ProvidedContext struct { + + // Reserved for future use. + ContextAssertion *string + + // Reserved for future use. + ProviderArn *string + + noSmithyDocumentSerde +} + +// You can pass custom key-value pair attributes when you assume a role or +// federate a user. These are called session tags. You can then use the session +// tags to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + + // The key for a session tag. You can pass up to 50 session tags. The plain text + // session tag keys can’t exceed 128 characters. For these and additional limits, + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Key *string + + // The value for a session tag. You can pass up to 50 session tags. The plain text + // session tag values can’t exceed 256 characters. For these and additional limits, + // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // This member is required. + Value *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go new file mode 100644 index 00000000000..3e4bad2a925 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go @@ -0,0 +1,305 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package sts + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/sts/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpAssumeRole struct { +} + +func (*validateOpAssumeRole) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRole) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithSAML struct { +} + +func (*validateOpAssumeRoleWithSAML) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithSAML) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithSAMLInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithSAMLInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpAssumeRoleWithWebIdentity struct { +} + +func (*validateOpAssumeRoleWithWebIdentity) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*AssumeRoleWithWebIdentityInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpAssumeRoleWithWebIdentityInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecodeAuthorizationMessage struct { +} + +func (*validateOpDecodeAuthorizationMessage) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecodeAuthorizationMessage) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecodeAuthorizationMessageInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecodeAuthorizationMessageInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetAccessKeyInfo struct { +} + +func (*validateOpGetAccessKeyInfo) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetAccessKeyInfo) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetAccessKeyInfoInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetAccessKeyInfoInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetFederationToken struct { +} + +func (*validateOpGetFederationToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetFederationToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetFederationTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetFederationTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpAssumeRoleValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRole{}, middleware.After) +} + +func addOpAssumeRoleWithSAMLValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithSAML{}, middleware.After) +} + +func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After) +} + +func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After) +} + +func addOpGetAccessKeyInfoValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetAccessKeyInfo{}, middleware.After) +} + +func addOpGetFederationTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetFederationToken{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.Key == nil { + invalidParams.Add(smithy.NewErrParamRequired("Key")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagListType(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagListType"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleInput(v *AssumeRoleInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithSAMLInput(v *AssumeRoleWithSAMLInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithSAMLInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.PrincipalArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrincipalArn")) + } + if v.SAMLAssertion == nil { + invalidParams.Add(smithy.NewErrParamRequired("SAMLAssertion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "AssumeRoleWithWebIdentityInput"} + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if v.RoleSessionName == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleSessionName")) + } + if v.WebIdentityToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("WebIdentityToken")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecodeAuthorizationMessageInput"} + if v.EncodedMessage == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncodedMessage")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetAccessKeyInfoInput(v *GetAccessKeyInfoInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetAccessKeyInfoInput"} + if v.AccessKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("AccessKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetFederationTokenInput(v *GetFederationTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetFederationTokenInput"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Tags != nil { + if err := validateTagListType(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore new file mode 100644 index 00000000000..c92d6105eb3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -0,0 +1,26 @@ +# Eclipse +.classpath +.project +.settings/ + +# Intellij +.idea/ +*.iml +*.iws + +# Mac +.DS_Store + +# Maven +target/ +**/dependency-reduced-pom.xml + +# Gradle +/.gradle +build/ +*/out/ +*/*/out/ + +# VS Code +bin/ +.vscode/ diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml new file mode 100644 index 00000000000..f8d1035cc33 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/.travis.yml @@ -0,0 +1,28 @@ +language: go +sudo: true +dist: bionic + +branches: + only: + - main + +os: + - linux + - osx + # Travis doesn't work with windows and Go tip + #- windows + +go: + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi + - (cd /tmp/; go get golang.org/x/lint/golint) + +script: + - make go test -v ./...; + diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md new file mode 100644 index 00000000000..9cca07b5563 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -0,0 +1,194 @@ +# Release (2023-10-31) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.16.0 + * **Feature**: **LANG**: Bump minimum go version to 1.19. + +# Release (2023-10-06) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.15.0 + * **Feature**: Add `http.WithHeaderComment` middleware. + +# Release (2023-08-18) + +* No change notes available for this release. + +# Release (2023-08-07) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.14.1 + * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. + +# Release (2023-07-31) + +## General Highlights +* **Feature**: Adds support for smithy-modeled endpoint resolution. + +# Release (2022-12-02) + +* No change notes available for this release. + +# Release (2022-10-24) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.4 + * **Bug Fix**: fixed document type checking for encoding nested types + +# Release (2022-09-14) + +* No change notes available for this release. + +# Release (v1.13.2) + +* No change notes available for this release. + +# Release (v1.13.1) + +* No change notes available for this release. + +# Release (v1.13.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.0 + * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. + +# Release (v1.12.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.1 + * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. + +# Release (v1.12.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.0 + * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. + +# Release (v1.11.3) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.3 + * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. + +# Release (v1.11.2) + +* No change notes available for this release. + +# Release (v1.11.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.1 + * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) + +# Release (v1.11.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.0 + * **Feature**: Updates deserialization of header list to supported quoted strings + +# Release (v1.10.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.10.0 + * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. + +# Release (v1.9.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.1 + * **Documentation**: Fixes various typos in Go package documentation. + +# Release (v1.9.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.9.0 + * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. + * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. + +# Release v1.8.1 + +### Smithy Go Module +* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. + * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) + +# Release v1.8.0 + +### Smithy Go Module + +* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) + * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. + * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) + +# Release v1.7.0 + +### Smithy Go Module +* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) + * Handle error for defer close call +* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) + * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. +* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) + +### Codegen +* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) + * Adds support for Smithy Document shapes and supporting types for protocols to implement support + +# Release v1.6.0 (2021-07-15) + +### Smithy Go Module +* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) + +### Codegen +* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) +* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) + +# Release v1.5.0 (2021-06-25) + +### Smithy Go module +* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) + * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. + +### Codegen +* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) +* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) +* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) +* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) +* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) +* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) + +# Release v1.4.0 (2021-05-06) + +### Smithy Go module +* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) + +### Codegen +* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) +* Add support for httpQueryParams location +* Add support for model renaming conflict resolution with service closure + +# Release v1.3.1 (2021-04-08) + +### Smithy Go module +* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) +* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) + +# Release v1.3.0 (2021-04-01) + +### Smithy Go module +* `transport/http`: Add utility to safely join string to url path, and url raw query. + +### Codegen +* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. + +# Release v1.2.0 (2021-03-12) + +### Smithy Go module +* Fix support for parsing shortened year format in HTTP Date header. +* Fix GitHub APIDiff action workflow to get gorelease tool correctly. +* Fix codegen artifact unit test for Go 1.16 + +### Codegen +* Fix generating paginator nil parameter handling before usage. +* Fix Serialize unboxed members decorated as required. +* Add ability to define resolvers at both client construction and operation invocation. +* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..5b627cfa60b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md @@ -0,0 +1,4 @@ +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md new file mode 100644 index 00000000000..c4b6a1c5081 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md @@ -0,0 +1,59 @@ +# Contributing Guidelines + +Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional +documentation, we greatly value feedback and contributions from our community. + +Please read through this document before submitting any issues or pull requests to ensure we have all the necessary +information to effectively respond to your bug report or contribution. + + +## Reporting Bugs/Feature Requests + +We welcome you to use the GitHub issue tracker to report bugs or suggest features. + +When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already +reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: + +* A reproducible test case or series of steps +* The version of our code being used +* Any modifications you've made relevant to the bug +* Anything unusual about your environment or deployment + + +## Contributing via Pull Requests +Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: + +1. You are working against the latest source on the *main* branch. +2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. +3. You open an issue to discuss any significant work - we would hate for your time to be wasted. + +To send us a pull request, please: + +1. Fork the repository. +2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. +3. Ensure local tests pass. +4. Commit to your fork using clear commit messages. +5. Send us a pull request, answering any default questions in the pull request interface. +6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. + +GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and +[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). + + +## Finding contributions to work on +Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. + + +## Code of Conduct +This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). +For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact +opensource-codeofconduct@amazon.com with any additional questions or comments. + + +## Security issue notifications +If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. + + +## Licensing + +See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE new file mode 100644 index 00000000000..67db8588217 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile new file mode 100644 index 00000000000..4b3c209373c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -0,0 +1,97 @@ +PRE_RELEASE_VERSION ?= + +RELEASE_MANIFEST_FILE ?= +RELEASE_CHGLOG_DESC_FILE ?= + +REPOTOOLS_VERSION ?= latest +REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools +REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= +REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} +REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} + +UNIT_TEST_TAGS= +BUILD_TAGS= + +ifneq ($(PRE_RELEASE_VERSION),) + REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} +endif + +smithy-publish-local: + cd codegen && ./gradlew publishToMavenLocal + +smithy-build: + cd codegen && ./gradlew build + +smithy-clean: + cd codegen && ./gradlew clean + +################## +# Linting/Verify # +################## +.PHONY: verify vet + +verify: vet + +vet: + go vet ${BUILD_TAGS} --all ./... + +################ +# Unit Testing # +################ +.PHONY: unit unit-race unit-test unit-race-test + +unit: verify + go vet ${BUILD_TAGS} --all ./... && \ + go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ + go test -timeout=1m ${UNIT_TEST_TAGS} ./... + +unit-race: verify + go vet ${BUILD_TAGS} --all ./... && \ + go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ + go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... + +unit-test: verify + go test -timeout=1m ${UNIT_TEST_TAGS} ./... + +unit-race-test: verify + go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... + +##################### +# Release Process # +##################### +.PHONY: preview-release pre-release-validation release + +preview-release: + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + +pre-release-validation: + @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ + echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ + fi + @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ + echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ + fi + +release: pre-release-validation + go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} + go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} + go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} + go run ${REPOTOOLS_CMD_CHANGELOG} rm -all + go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} + +module-version: + @go run ${REPOTOOLS_CMD_MODULE_VERSION} . + +############## +# Repo Tools # +############## +.PHONY: install-changelog + +install-changelog: + go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE new file mode 100644 index 00000000000..616fc588945 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/NOTICE @@ -0,0 +1 @@ +Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md new file mode 100644 index 00000000000..c374f69283b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/README.md @@ -0,0 +1,27 @@ +## Smithy Go + +[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) + +[Smithy](https://smithy.io/) code generators for Go. + +**WARNING: All interfaces are subject to change.** + +## Can I use this? + +In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), +such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), +in order to generate transport mechanisms and serialization/deserialization +code ("serde") accordingly. + +The code generator does not currently support any protocols out of the box, +therefore the useability of this project on its own is currently limited. +Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) +exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are +tracking the movement of those out of the SDK into smithy-go in +[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no +timeline for doing so. + +## License + +This project is licensed under the Apache-2.0 License. + diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go new file mode 100644 index 00000000000..1c9b9715cb0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go @@ -0,0 +1,3 @@ +// Package bearer provides middleware and utilities for authenticating API +// operation calls with a Bearer Token. +package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go new file mode 100644 index 00000000000..8c7d7209959 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go @@ -0,0 +1,104 @@ +package bearer + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Message is the middleware stack's request transport message value. +type Message interface{} + +// Signer provides an interface for implementations to decorate a request +// message with a bearer token. The signer is responsible for validating the +// message type is compatible with the signer. +type Signer interface { + SignWithBearerToken(context.Context, Token, Message) (Message, error) +} + +// AuthenticationMiddleware provides the Finalize middleware step for signing +// an request message with a bearer token. +type AuthenticationMiddleware struct { + signer Signer + tokenProvider TokenProvider +} + +// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the +// middleware Stack in the Finalize step with the options provided. +func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { + return s.Finalize.Add( + NewAuthenticationMiddleware(signer, tokenProvider), + middleware.After, + ) +} + +// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. +func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { + return &AuthenticationMiddleware{ + signer: signer, + tokenProvider: tokenProvider, + } +} + +const authenticationMiddlewareID = "BearerTokenAuthentication" + +// ID returns the resolver identifier +func (m *AuthenticationMiddleware) ID() string { + return authenticationMiddlewareID +} + +// HandleFinalize implements the FinalizeMiddleware interface in order to +// update the request with bearer token authentication. +func (m *AuthenticationMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + token, err := m.tokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) + } + + signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) + } + + in.Request = signedMessage + return next.HandleFinalize(ctx, in) +} + +// SignHTTPSMessage provides a bearer token authentication implementation that +// will sign the message with the provided bearer token. +// +// Will fail if the message is not a smithy-go HTTP request or the request is +// not HTTPS. +type SignHTTPSMessage struct{} + +// NewSignHTTPSMessage returns an initialized signer for HTTP messages. +func NewSignHTTPSMessage() *SignHTTPSMessage { + return &SignHTTPSMessage{} +} + +// SignWithBearerToken returns a copy of the HTTP request with the bearer token +// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. +// +// Returns an error if the request's URL scheme is not HTTPS, or the request +// message is not an smithy-go HTTP Request pointer type. +func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { + req, ok := message.(*smithyhttp.Request) + if !ok { + return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) + } + + if !req.IsHTTPS() { + return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") + } + + reqClone := req.Clone() + reqClone.Header.Set("Authorization", "Bearer "+token.Value) + + return reqClone, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go new file mode 100644 index 00000000000..be260d4c764 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "context" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpires is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(context.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(context.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go new file mode 100644 index 00000000000..223ddf52bba --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go @@ -0,0 +1,208 @@ +package bearer + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + smithycontext "github.com/aws/smithy-go/context" + "github.com/aws/smithy-go/internal/sync/singleflight" +) + +// package variable that can be override in unit tests. +var timeNow = time.Now + +// TokenCacheOptions provides a set of optional configuration options for the +// TokenCache TokenProvider. +type TokenCacheOptions struct { + // The duration before the token will expire when the credentials will be + // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls + // will be blocking. + // + // Asynchronous refreshes are deduplicated, and only one will be in-flight + // at a time. If the token expires while an asynchronous refresh is in + // flight, the next call to RetrieveBearerToken will block on that refresh + // to return. + RefreshBeforeExpires time.Duration + + // The timeout the underlying TokenProvider's RetrieveBearerToken call must + // return within, or will be canceled. Defaults to 0, no timeout. + // + // If 0 timeout, its possible for the underlying tokenProvider's + // RetrieveBearerToken call to block forever. Preventing subsequent + // TokenCache attempts to refresh the token. + // + // If this timeout is reached all pending deduplicated calls to + // TokenCache RetrieveBearerToken will fail with an error. + RetrieveBearerTokenTimeout time.Duration + + // The minimum duration between asynchronous refresh attempts. If the next + // asynchronous recent refresh attempt was within the minimum delay + // duration, the call to retrieve will return the current cached token, if + // not expired. + // + // The asynchronous retrieve is deduplicated across multiple calls when + // RetrieveBearerToken is called. The asynchronous retrieve is not a + // periodic task. It is only performed when the token has not yet expired, + // and the current item is within the RefreshBeforeExpires window, and the + // TokenCache's RetrieveBearerToken method is called. + // + // If 0, (default) there will be no minimum delay between asynchronous + // refresh attempts. + // + // If DisableAsyncRefresh is true, this option is ignored. + AsyncRefreshMinimumDelay time.Duration + + // Sets if the TokenCache will attempt to refresh the token in the + // background asynchronously instead of blocking for credentials to be + // refreshed. If disabled token refresh will be blocking. + // + // The first call to RetrieveBearerToken will always be blocking, because + // there is no cached token. + DisableAsyncRefresh bool +} + +// TokenCache provides an utility to cache Bearer Authentication tokens from a +// wrapped TokenProvider. The TokenCache can be has options to configure the +// cache's early and asynchronous refresh of the token. +type TokenCache struct { + options TokenCacheOptions + provider TokenProvider + + cachedToken atomic.Value + lastRefreshAttemptTime atomic.Value + sfGroup singleflight.Group +} + +// NewTokenCache returns a initialized TokenCache that implements the +// TokenProvider interface. Wrapping the provider passed in. Also taking a set +// of optional functional option parameters to configure the token cache. +func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { + var options TokenCacheOptions + for _, fn := range optFns { + fn(&options) + } + + return &TokenCache{ + options: options, + provider: provider, + } +} + +// RetrieveBearerToken returns the token if it could be obtained, or error if a +// valid token could not be retrieved. +// +// The passed in Context's cancel/deadline/timeout will impacting only this +// individual retrieve call and not any other already queued up calls. This +// means underlying provider's RetrieveBearerToken calls could block for ever, +// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to +// provide a timeout, preventing the underlying TokenProvider blocking forever. +// +// By default, if the passed in Context is canceled, all of its values will be +// considered expired. The wrapped TokenProvider will not be able to lookup the +// values from the Context once it is expired. This is done to protect against +// expired values no longer being valid. To disable this behavior, use +// smithy-go's context.WithPreserveExpiredValues to add a value to the Context +// before calling RetrieveBearerToken to enable support for expired values. +// +// Without RetrieveBearerTokenTimeout there is the potential for a underlying +// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent +// attempts at refreshing the token. +func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { + cachedToken, ok := p.getCachedToken() + if !ok || cachedToken.Expired(timeNow()) { + return p.refreshBearerToken(ctx) + } + + // Check if the token should be refreshed before it expires. + refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) + if !refreshToken { + return cachedToken, nil + } + + if p.options.DisableAsyncRefresh { + return p.refreshBearerToken(ctx) + } + + p.tryAsyncRefresh(ctx) + + return cachedToken, nil +} + +// tryAsyncRefresh attempts to asynchronously refresh the token returning the +// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and +// the duration since the last refresh is less than that value, nothing will be +// done. +func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { + if p.options.AsyncRefreshMinimumDelay != 0 { + var lastRefreshAttempt time.Time + if v := p.lastRefreshAttemptTime.Load(); v != nil { + lastRefreshAttempt = v.(time.Time) + } + + if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { + return + } + } + + // Ignore the returned channel so this won't be blocking, and limit the + // number of additional goroutines created. + p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { + res, err := p.refreshBearerToken(ctx) + if p.options.AsyncRefreshMinimumDelay != 0 { + var refreshAttempt time.Time + if err != nil { + refreshAttempt = timeNow() + } + p.lastRefreshAttemptTime.Store(refreshAttempt) + } + + return res, err + }) +} + +func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { + resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { + ctx := smithycontext.WithSuppressCancel(ctx) + if v := p.options.RetrieveBearerTokenTimeout; v != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, v) + defer cancel() + } + return p.singleRetrieve(ctx) + }) + + select { + case res := <-resCh: + return res.Val.(Token), res.Err + case <-ctx.Done(): + return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) + } +} + +func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { + token, err := p.provider.RetrieveBearerToken(ctx) + if err != nil { + return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) + } + + p.cachedToken.Store(&token) + return token, nil +} + +// getCachedToken returns the currently cached token and true if found. Returns +// false if no token is cached. +func (p *TokenCache) getCachedToken() (Token, bool) { + v := p.cachedToken.Load() + if v == nil { + return Token{}, false + } + + t := v.(*Token) + if t == nil || t.Value == "" { + return Token{}, false + } + + return *t, true +} diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go new file mode 100644 index 00000000000..a39b84a2784 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/context/suppress_expired.go @@ -0,0 +1,81 @@ +package context + +import "context" + +// valueOnlyContext provides a utility to preserve only the values of a +// Context. Suppressing any cancellation or deadline on that context being +// propagated downstream of this value. +// +// If preserveExpiredValues is false (default), and the valueCtx is canceled, +// calls to lookup values with the Values method, will always return nil. Setting +// preserveExpiredValues to true, will allow the valueOnlyContext to lookup +// values in valueCtx even if valueCtx is canceled. +// +// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. +// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go +type valueOnlyContext struct { + context.Context + + preserveExpiredValues bool + valuesCtx context.Context +} + +var _ context.Context = (*valueOnlyContext)(nil) + +// Value looks up the key, returning its value. If configured to not preserve +// values of expired context, and the wrapping context is canceled, nil will be +// returned. +func (v *valueOnlyContext) Value(key interface{}) interface{} { + if !v.preserveExpiredValues { + select { + case <-v.valuesCtx.Done(): + return nil + default: + } + } + + return v.valuesCtx.Value(key) +} + +// WithSuppressCancel wraps the Context value, suppressing its deadline and +// cancellation events being propagated downstream to consumer of the returned +// context. +// +// By default the wrapped Context's Values are available downstream until the +// wrapped Context is canceled. Once the wrapped Context is canceled, Values +// method called on the context return will no longer lookup any key. As they +// are now considered expired. +// +// To override this behavior, use WithPreserveExpiredValues on the Context +// before it is wrapped by WithSuppressCancel. This will make the Context +// returned by WithSuppressCancel allow lookup of expired values. +func WithSuppressCancel(ctx context.Context) context.Context { + return &valueOnlyContext{ + Context: context.Background(), + valuesCtx: ctx, + + preserveExpiredValues: GetPreserveExpiredValues(ctx), + } +} + +type preserveExpiredValuesKey struct{} + +// WithPreserveExpiredValues adds a Value to the Context if expired values +// should be preserved, and looked up by a Context wrapped by +// WithSuppressCancel. +// +// WithPreserveExpiredValues must be added as a value to a Context, before that +// Context is wrapped by WithSuppressCancel +func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { + return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) +} + +// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues +// value in the context. Returning true if enabled, false otherwise. +func GetPreserveExpiredValues(ctx context.Context) bool { + v := ctx.Value(preserveExpiredValuesKey{}) + if v != nil { + return v.(bool) + } + return false +} diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go new file mode 100644 index 00000000000..87b0c74b75c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/doc.go @@ -0,0 +1,2 @@ +// Package smithy provides the core components for a Smithy SDK. +package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go new file mode 100644 index 00000000000..dec498c57bf --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document.go @@ -0,0 +1,10 @@ +package smithy + +// Document provides access to loosely structured data in a document-like +// format. +// +// Deprecated: See the github.com/aws/smithy-go/document package. +type Document interface { + UnmarshalDocument(interface{}) error + GetValue() (interface{}, error) +} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go new file mode 100644 index 00000000000..03055b7a1c2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/doc.go @@ -0,0 +1,12 @@ +// Package document provides interface definitions and error types for document types. +// +// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send +// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 +// strings to these values. +// +// API Clients expose document constructors in their respective client document packages which must be used to +// Marshal and Unmarshal Go types to and from their respective protocol representations. +// +// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from +// document types. +package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go new file mode 100644 index 00000000000..8f852d95c69 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/document.go @@ -0,0 +1,153 @@ +package document + +import ( + "fmt" + "math/big" + "strconv" +) + +// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and +// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. +// +// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. +// Anonymous nested types are flattened based on Go anonymous type visibility. +// +// When defining struct types. the `document` struct tag can be used to control how the value will be +// marshaled into the resulting protocol document. +// +// // Field is ignored +// Field int `document:"-"` +// +// // Field object of key "myName" +// Field int `document:"myName"` +// +// // Field object key of key "myName", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:"myName,omitempty"` +// +// // Field object key of "Field", and +// // Field is omitted if the field is a zero value for the type. +// Field int `document:",omitempty"` +// +// All struct fields, including anonymous fields, are marshaled unless the +// any of the following conditions are meet. +// +// - the field is not exported +// - document field tag is "-" +// - document field tag specifies "omitempty", and is a zero value. +// +// Pointer and interface values are encoded as the value pointed to or +// contained in the interface. A nil value encodes as a null +// value unless `omitempty` struct tag is provided. +// +// Channel, complex, and function values are not encoded and will be skipped +// when walking the value to be marshaled. +// +// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented +// by your application as a string or numerical representation. +// +// Errors that occur when marshaling will stop the marshaler, and return the error. +// +// Marshal cannot represent cyclic data structures and will not handle them. +// Passing cyclic structures to Marshal will result in an infinite recursion. +type Marshaler interface { + MarshalSmithyDocument() ([]byte, error) +} + +// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and +// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be +// returned. +// +// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` +// struct field tag for controlling how struct fields are unmarshaled. +// +// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document +// into an empty interface the Unmarshaler will store one of these values: +// bool, for boolean values +// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) +// string, for string values +// []interface{}, for array values +// map[string]interface{}, for objects +// nil, for null values +// +// When unmarshaling, any error that occurs will halt the unmarshal and return the error. +type Unmarshaler interface { + UnmarshalSmithyDocument(v interface{}) error +} + +type noSerde interface { + noSmithyDocumentSerde() +} + +// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled +// into a protocol document. +type NoSerde struct{} + +func (n NoSerde) noSmithyDocumentSerde() {} + +var _ noSerde = (*NoSerde)(nil) + +// IsNoSerde returns whether the given type implements the no smithy document serde interface. +func IsNoSerde(x interface{}) bool { + _, ok := x.(noSerde) + return ok +} + +// Number is an arbitrary precision numerical value +type Number string + +// Int64 returns the number as a string. +func (n Number) String() string { + return string(n) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return n.intOfBitSize(64) +} + +func (n Number) intOfBitSize(bitSize int) (int64, error) { + return strconv.ParseInt(string(n), 10, bitSize) +} + +// Uint64 returns the number as a uint64. +func (n Number) Uint64() (uint64, error) { + return n.uintOfBitSize(64) +} + +func (n Number) uintOfBitSize(bitSize int) (uint64, error) { + return strconv.ParseUint(string(n), 10, bitSize) +} + +// Float32 returns the number parsed as a 32-bit float, returns a float64. +func (n Number) Float32() (float64, error) { + return n.floatOfBitSize(32) +} + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return n.floatOfBitSize(64) +} + +// Float64 returns the number as a float64. +func (n Number) floatOfBitSize(bitSize int) (float64, error) { + return strconv.ParseFloat(string(n), bitSize) +} + +// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. +func (n Number) BigFloat() (*big.Float, error) { + f, ok := (&big.Float{}).SetString(string(n)) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} + +// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. +func (n Number) BigInt() (*big.Int, error) { + f, ok := (&big.Int{}).SetString(string(n), 10) + if !ok { + return nil, fmt.Errorf("failed to convert to big.Float") + } + return f, nil +} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go new file mode 100644 index 00000000000..046a7a76531 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/document/errors.go @@ -0,0 +1,75 @@ +package document + +import ( + "fmt" + "reflect" +) + +// UnmarshalTypeError is an error type representing an error +// unmarshaling a Smithy document to a Go value type. This is different +// from UnmarshalError in that it does not wrap an underlying error type. +type UnmarshalTypeError struct { + Value string + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalTypeError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", + e.Value, e.Type.String()) +} + +// An InvalidUnmarshalError is an error type representing an invalid type +// encountered while unmarshaling a Smithy document to a Go value type. +type InvalidUnmarshalError struct { + Type reflect.Type +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidUnmarshalError) Error() string { + var msg string + if e.Type == nil { + msg = "cannot unmarshal to nil value" + } else if e.Type.Kind() != reflect.Ptr { + msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) + } else { + msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) + } + + return fmt.Sprintf("unmarshal failed, %s", msg) +} + +// An UnmarshalError wraps an error that occurred while unmarshaling a +// Smithy document into a Go type. This is different from +// UnmarshalTypeError in that it wraps the underlying error that occurred. +type UnmarshalError struct { + Err error + Value string + Type reflect.Type +} + +// Unwrap returns the underlying unmarshaling error +func (e *UnmarshalError) Unwrap() error { + return e.Err +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *UnmarshalError) Error() string { + return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", + e.Value, e.Type.String(), e.Err) +} + +// An InvalidMarshalError is an error type representing an error +// occurring when marshaling a Go value type. +type InvalidMarshalError struct { + Message string +} + +// Error returns the string representation of the error. +// Satisfying the error interface. +func (e *InvalidMarshalError) Error() string { + return fmt.Sprintf("marshal failed, %s", e.Message) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/doc.go b/vendor/github.com/aws/smithy-go/encoding/doc.go new file mode 100644 index 00000000000..792fdfa08b3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/doc.go @@ -0,0 +1,4 @@ +// Package encoding provides utilities for encoding values for specific +// document encodings. + +package encoding diff --git a/vendor/github.com/aws/smithy-go/encoding/encoding.go b/vendor/github.com/aws/smithy-go/encoding/encoding.go new file mode 100644 index 00000000000..2fdfb522502 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/encoding.go @@ -0,0 +1,40 @@ +package encoding + +import ( + "fmt" + "math" + "strconv" +) + +// EncodeFloat encodes a float value as per the stdlib encoder for json and xml protocol +// This encodes a float value into dst while attempting to conform to ES6 ToString for Numbers +// +// Based on encoding/json floatEncoder from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func EncodeFloat(dst []byte, v float64, bits int) []byte { + if math.IsInf(v, 0) || math.IsNaN(v) { + panic(fmt.Sprintf("invalid float value: %s", strconv.FormatFloat(v, 'g', -1, bits))) + } + + abs := math.Abs(v) + fmt := byte('f') + + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + dst = strconv.AppendFloat(dst, v, fmt, -1, bits) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + + return dst +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go new file mode 100644 index 00000000000..543e7cf0387 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go @@ -0,0 +1,123 @@ +package httpbinding + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + contentLengthHeader = "Content-Length" + floatNaN = "NaN" + floatInfinity = "Infinity" + floatNegInfinity = "-Infinity" +) + +// An Encoder provides encoding of REST URI path, query, and header components +// of an HTTP request. Can also encode a stream as the payload. +// +// Does not support SetFields. +type Encoder struct { + path, rawPath, pathBuffer []byte + + query url.Values + header http.Header +} + +// NewEncoder creates a new encoder from the passed in request. It assumes that +// raw path contains no valuable information at this point, so it passes in path +// as path and raw path for subsequent trans +func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { + return NewEncoderWithRawPath(path, path, query, headers) +} + +// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and +// header values will be added on top of the request's existing values. Overwriting +// duplicate values. +func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) { + parseQuery, err := url.ParseQuery(query) + if err != nil { + return nil, fmt.Errorf("failed to parse query string: %w", err) + } + + e := &Encoder{ + path: []byte(path), + rawPath: []byte(rawPath), + query: parseQuery, + header: headers.Clone(), + } + + return e, nil +} + +// Encode returns a REST protocol encoder for encoding HTTP bindings. +// +// Due net/http requiring `Content-Length` to be specified on the http.Request#ContentLength directly. Encode +// will look for whether the header is present, and if so will remove it and set the respective value on http.Request. +// +// Returns any error occurring during encoding. +func (e *Encoder) Encode(req *http.Request) (*http.Request, error) { + req.URL.Path, req.URL.RawPath = string(e.path), string(e.rawPath) + req.URL.RawQuery = e.query.Encode() + + // net/http ignores Content-Length header and requires it to be set on http.Request + if v := e.header.Get(contentLengthHeader); len(v) > 0 { + iv, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + req.ContentLength = iv + e.header.Del(contentLengthHeader) + } + + req.Header = e.header + + return req, nil +} + +// AddHeader returns a HeaderValue for appending to the given header name +func (e *Encoder) AddHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, true) +} + +// SetHeader returns a HeaderValue for setting the given header name +func (e *Encoder) SetHeader(key string) HeaderValue { + return newHeaderValue(e.header, key, false) +} + +// Headers returns a Header used for encoding headers with the given prefix +func (e *Encoder) Headers(prefix string) Headers { + return Headers{ + header: e.header, + prefix: strings.TrimSpace(prefix), + } +} + +// HasHeader returns if a header with the key specified exists with one or +// more value. +func (e Encoder) HasHeader(key string) bool { + return len(e.header[key]) != 0 +} + +// SetURI returns a URIValue used for setting the given path key +func (e *Encoder) SetURI(key string) URIValue { + return newURIValue(&e.path, &e.rawPath, &e.pathBuffer, key) +} + +// SetQuery returns a QueryValue used for setting the given query key +func (e *Encoder) SetQuery(key string) QueryValue { + return NewQueryValue(e.query, key, false) +} + +// AddQuery returns a QueryValue used for appending the given query key +func (e *Encoder) AddQuery(key string) QueryValue { + return NewQueryValue(e.query, key, true) +} + +// HasQuery returns if a query with the key specified exists with one or +// more values. +func (e *Encoder) HasQuery(key string) bool { + return len(e.query.Get(key)) != 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go new file mode 100644 index 00000000000..f9256e175fc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/header.go @@ -0,0 +1,122 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/http" + "strconv" + "strings" +) + +// Headers is used to encode header keys using a provided prefix +type Headers struct { + header http.Header + prefix string +} + +// AddHeader returns a HeaderValue used to append values to prefix+key +func (h Headers) AddHeader(key string) HeaderValue { + return h.newHeaderValue(key, true) +} + +// SetHeader returns a HeaderValue used to set the value of prefix+key +func (h Headers) SetHeader(key string) HeaderValue { + return h.newHeaderValue(key, false) +} + +func (h Headers) newHeaderValue(key string, append bool) HeaderValue { + return newHeaderValue(h.header, h.prefix+strings.TrimSpace(key), append) +} + +// HeaderValue is used to encode values to an HTTP header +type HeaderValue struct { + header http.Header + key string + append bool +} + +func newHeaderValue(header http.Header, key string, append bool) HeaderValue { + return HeaderValue{header: header, key: strings.TrimSpace(key), append: append} +} + +func (h HeaderValue) modifyHeader(value string) { + if h.append { + h.header[h.key] = append(h.header[h.key], value) + } else { + h.header[h.key] = append(h.header[h.key][:0], value) + } +} + +// String encodes the value v as the header string value +func (h HeaderValue) String(v string) { + h.modifyHeader(v) +} + +// Byte encodes the value v as a query string value +func (h HeaderValue) Byte(v int8) { + h.Long(int64(v)) +} + +// Short encodes the value v as a query string value +func (h HeaderValue) Short(v int16) { + h.Long(int64(v)) +} + +// Integer encodes the value v as the header string value +func (h HeaderValue) Integer(v int32) { + h.Long(int64(v)) +} + +// Long encodes the value v as the header string value +func (h HeaderValue) Long(v int64) { + h.modifyHeader(strconv.FormatInt(v, 10)) +} + +// Boolean encodes the value v as a query string value +func (h HeaderValue) Boolean(v bool) { + h.modifyHeader(strconv.FormatBool(v)) +} + +// Float encodes the value v as a query string value +func (h HeaderValue) Float(v float32) { + h.float(float64(v), 32) +} + +// Double encodes the value v as a query string value +func (h HeaderValue) Double(v float64) { + h.float(v, 64) +} + +func (h HeaderValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + h.String(floatNaN) + case math.IsInf(v, 1): + h.String(floatInfinity) + case math.IsInf(v, -1): + h.String(floatNegInfinity) + default: + h.modifyHeader(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes the value v as a query string value +func (h HeaderValue) BigInteger(v *big.Int) { + h.modifyHeader(v.String()) +} + +// BigDecimal encodes the value v as a query string value +func (h HeaderValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + h.Long(i) + return + } + h.modifyHeader(v.Text('e', -1)) +} + +// Blob encodes the value v as a base64 header string value +func (h HeaderValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + h.modifyHeader(encodeToString) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go new file mode 100644 index 00000000000..e78926c9a56 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go @@ -0,0 +1,108 @@ +package httpbinding + +import ( + "bytes" + "fmt" +) + +const ( + uriTokenStart = '{' + uriTokenStop = '}' + uriTokenSkip = '+' +) + +func bufCap(b []byte, n int) []byte { + if cap(b) < n { + return make([]byte, 0, n) + } + + return b[0:0] +} + +// replacePathElement replaces a single element in the path []byte. +// Escape is used to control whether the value will be escaped using Amazon path escape style. +func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) { + fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] } + fieldBuf = append(fieldBuf, uriTokenStart) + fieldBuf = append(fieldBuf, key...) + + start := bytes.Index(path, fieldBuf) + end := start + len(fieldBuf) + if start < 0 || len(path[end:]) == 0 { + // TODO what to do about error? + return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path) + } + + encodeSep := true + if path[end] == uriTokenSkip { + // '+' token means do not escape slashes + encodeSep = false + end++ + } + + if escape { + val = EscapePath(val, encodeSep) + } + + if path[end] != uriTokenStop { + return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path) + } + end++ + + fieldBuf = bufCap(fieldBuf, len(val)) + fieldBuf = append(fieldBuf, val...) + + keyLen := end - start + valLen := len(fieldBuf) + + if keyLen == valLen { + copy(path[start:], fieldBuf) + return path, fieldBuf, nil + } + + newLen := len(path) + (valLen - keyLen) + if len(path) < newLen { + path = path[:cap(path)] + } + if cap(path) < newLen { + newURI := make([]byte, newLen) + copy(newURI, path) + path = newURI + } + + // shift + copy(path[start+valLen:], path[end:]) + path = path[:newLen] + copy(path[start:], fieldBuf) + + return path, fieldBuf, nil +} + +// EscapePath escapes part of a URL path in Amazon style. +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +var noEscape [256]bool + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go new file mode 100644 index 00000000000..c2e7d0a20f4 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/query.go @@ -0,0 +1,107 @@ +package httpbinding + +import ( + "encoding/base64" + "math" + "math/big" + "net/url" + "strconv" +) + +// QueryValue is used to encode query key values +type QueryValue struct { + query url.Values + key string + append bool +} + +// NewQueryValue creates a new QueryValue which enables encoding +// a query value into the given url.Values. +func NewQueryValue(query url.Values, key string, append bool) QueryValue { + return QueryValue{ + query: query, + key: key, + append: append, + } +} + +func (qv QueryValue) updateKey(value string) { + if qv.append { + qv.query.Add(qv.key, value) + } else { + qv.query.Set(qv.key, value) + } +} + +// Blob encodes v as a base64 query string value +func (qv QueryValue) Blob(v []byte) { + encodeToString := base64.StdEncoding.EncodeToString(v) + qv.updateKey(encodeToString) +} + +// Boolean encodes v as a query string value +func (qv QueryValue) Boolean(v bool) { + qv.updateKey(strconv.FormatBool(v)) +} + +// String encodes v as a query string value +func (qv QueryValue) String(v string) { + qv.updateKey(v) +} + +// Byte encodes v as a query string value +func (qv QueryValue) Byte(v int8) { + qv.Long(int64(v)) +} + +// Short encodes v as a query string value +func (qv QueryValue) Short(v int16) { + qv.Long(int64(v)) +} + +// Integer encodes v as a query string value +func (qv QueryValue) Integer(v int32) { + qv.Long(int64(v)) +} + +// Long encodes v as a query string value +func (qv QueryValue) Long(v int64) { + qv.updateKey(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (qv QueryValue) Float(v float32) { + qv.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (qv QueryValue) Double(v float64) { + qv.float(v, 64) +} + +func (qv QueryValue) float(v float64, bitSize int) { + switch { + case math.IsNaN(v): + qv.String(floatNaN) + case math.IsInf(v, 1): + qv.String(floatInfinity) + case math.IsInf(v, -1): + qv.String(floatNegInfinity) + default: + qv.updateKey(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (qv QueryValue) BigInteger(v *big.Int) { + qv.updateKey(v.String()) +} + +// BigDecimal encodes v as a query string value +func (qv QueryValue) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + qv.Long(i) + return + } + qv.updateKey(v.Text('e', -1)) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go new file mode 100644 index 00000000000..f04e11984ac --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/uri.go @@ -0,0 +1,111 @@ +package httpbinding + +import ( + "math" + "math/big" + "strconv" + "strings" +) + +// URIValue is used to encode named URI parameters +type URIValue struct { + path, rawPath, buffer *[]byte + + key string +} + +func newURIValue(path *[]byte, rawPath *[]byte, buffer *[]byte, key string) URIValue { + return URIValue{path: path, rawPath: rawPath, buffer: buffer, key: key} +} + +func (u URIValue) modifyURI(value string) (err error) { + *u.path, *u.buffer, err = replacePathElement(*u.path, *u.buffer, u.key, value, false) + if err != nil { + return err + } + *u.rawPath, *u.buffer, err = replacePathElement(*u.rawPath, *u.buffer, u.key, value, true) + return err +} + +// Boolean encodes v as a URI string value +func (u URIValue) Boolean(v bool) error { + return u.modifyURI(strconv.FormatBool(v)) +} + +// String encodes v as a URI string value +func (u URIValue) String(v string) error { + return u.modifyURI(v) +} + +// Byte encodes v as a URI string value +func (u URIValue) Byte(v int8) error { + return u.Long(int64(v)) +} + +// Short encodes v as a URI string value +func (u URIValue) Short(v int16) error { + return u.Long(int64(v)) +} + +// Integer encodes v as a URI string value +func (u URIValue) Integer(v int32) error { + return u.Long(int64(v)) +} + +// Long encodes v as a URI string value +func (u URIValue) Long(v int64) error { + return u.modifyURI(strconv.FormatInt(v, 10)) +} + +// Float encodes v as a query string value +func (u URIValue) Float(v float32) error { + return u.float(float64(v), 32) +} + +// Double encodes v as a query string value +func (u URIValue) Double(v float64) error { + return u.float(v, 64) +} + +func (u URIValue) float(v float64, bitSize int) error { + switch { + case math.IsNaN(v): + return u.String(floatNaN) + case math.IsInf(v, 1): + return u.String(floatInfinity) + case math.IsInf(v, -1): + return u.String(floatNegInfinity) + default: + return u.modifyURI(strconv.FormatFloat(v, 'f', -1, bitSize)) + } +} + +// BigInteger encodes v as a query string value +func (u URIValue) BigInteger(v *big.Int) error { + return u.modifyURI(v.String()) +} + +// BigDecimal encodes v as a query string value +func (u URIValue) BigDecimal(v *big.Float) error { + if i, accuracy := v.Int64(); accuracy == big.Exact { + return u.Long(i) + } + return u.modifyURI(v.Text('e', -1)) +} + +// SplitURI parses a Smithy HTTP binding trait URI +func SplitURI(uri string) (path, query string) { + queryStart := strings.IndexRune(uri, '?') + if queryStart == -1 { + path = uri + return path, query + } + + path = uri[:queryStart] + if queryStart+1 >= len(uri) { + return path, query + } + query = uri[queryStart+1:] + + return path, query +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/array.go b/vendor/github.com/aws/smithy-go/encoding/json/array.go new file mode 100644 index 00000000000..7a232f660f1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/array.go @@ -0,0 +1,35 @@ +package json + +import ( + "bytes" +) + +// Array represents the encoding of a JSON Array +type Array struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newArray(w *bytes.Buffer, scratch *[]byte) *Array { + w.WriteRune(leftBracket) + return &Array{w: w, scratch: scratch} +} + +// Value adds a new element to the JSON Array. +// Returns a Value type that is used to encode +// the array element. +func (a *Array) Value() Value { + if a.writeComma { + a.w.WriteRune(comma) + } else { + a.writeComma = true + } + + return newValue(a.w, a.scratch) +} + +// Close encodes the end of the JSON Array +func (a *Array) Close() { + a.w.WriteRune(rightBracket) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/constants.go b/vendor/github.com/aws/smithy-go/encoding/json/constants.go new file mode 100644 index 00000000000..91044092aef --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/constants.go @@ -0,0 +1,15 @@ +package json + +const ( + leftBrace = '{' + rightBrace = '}' + + leftBracket = '[' + rightBracket = ']' + + comma = ',' + quote = '"' + colon = ':' + + null = "null" +) diff --git a/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go new file mode 100644 index 00000000000..7050c85b3c6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/decoder_util.go @@ -0,0 +1,139 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// DiscardUnknownField discards unknown fields from a decoder body. +// This function is useful while deserializing a JSON body with additional +// unknown information that should be discarded. +func DiscardUnknownField(decoder *json.Decoder) error { + // This deliberately does not share logic with CollectUnknownField, even + // though it could, because if we were to delegate to that then we'd incur + // extra allocations and general memory usage. + v, err := decoder.Token() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + if _, ok := v.(json.Delim); ok { + for decoder.More() { + err = DiscardUnknownField(decoder) + } + endToken, err := decoder.Token() + if err != nil { + return err + } + if _, ok := endToken.(json.Delim); !ok { + return fmt.Errorf("invalid JSON : expected json delimiter, found %T %v", + endToken, endToken) + } + } + + return nil +} + +// CollectUnknownField grabs the contents of unknown fields from the decoder body +// and returns them as a byte slice. This is useful for skipping unknown fields without +// completely discarding them. +func CollectUnknownField(decoder *json.Decoder) ([]byte, error) { + result, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + buff := bytes.NewBuffer(nil) + encoder := json.NewEncoder(buff) + + if err := encoder.Encode(result); err != nil { + return nil, err + } + + return buff.Bytes(), nil +} + +func collectUnknownField(decoder *json.Decoder) (interface{}, error) { + // Grab the initial value. This could either be a concrete value like a string or a a + // delimiter. + token, err := decoder.Token() + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + + // If it's an array or object, we'll need to recurse. + delim, ok := token.(json.Delim) + if ok { + var result interface{} + if delim == '{' { + result, err = collectUnknownObject(decoder) + if err != nil { + return nil, err + } + } else { + result, err = collectUnknownArray(decoder) + if err != nil { + return nil, err + } + } + + // Discard the closing token. decoder.Token handles checking for matching delimiters + if _, err := decoder.Token(); err != nil { + return nil, err + } + return result, nil + } + + return token, nil +} + +func collectUnknownArray(decoder *json.Decoder) ([]interface{}, error) { + // We need to create an empty array here instead of a nil array, since by getting + // into this function at all we necessarily have seen a non-nil list. + array := []interface{}{} + + for decoder.More() { + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + array = append(array, value) + } + + return array, nil +} + +func collectUnknownObject(decoder *json.Decoder) (map[string]interface{}, error) { + object := make(map[string]interface{}) + + for decoder.More() { + key, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + // Keys have to be strings, which is particularly important as the encoder + // won't except a map with interface{} keys + stringKey, ok := key.(string) + if !ok { + return nil, fmt.Errorf("expected string key, found %T", key) + } + + value, err := collectUnknownField(decoder) + if err != nil { + return nil, err + } + + object[stringKey] = value + } + + return object, nil +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/encoder.go b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go new file mode 100644 index 00000000000..8772953f1e6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/encoder.go @@ -0,0 +1,30 @@ +package json + +import ( + "bytes" +) + +// Encoder is JSON encoder that supports construction of JSON values +// using methods. +type Encoder struct { + w *bytes.Buffer + Value +} + +// NewEncoder returns a new JSON encoder +func NewEncoder() *Encoder { + writer := bytes.NewBuffer(nil) + scratch := make([]byte, 64) + + return &Encoder{w: writer, Value: newValue(writer, &scratch)} +} + +// String returns the String output of the JSON encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the JSON encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/escape.go b/vendor/github.com/aws/smithy-go/encoding/json/escape.go new file mode 100644 index 00000000000..d984d0cdca1 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/escape.go @@ -0,0 +1,198 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.8 stdlib's encoding/json/#safeSet + +package json + +import ( + "bytes" + "unicode/utf8" +) + +// safeSet holds the value true if the ASCII character with the given array +// position can be represented inside a JSON string without any further +// escaping. +// +// All values are true except for the ASCII control characters (0-31), the +// double quote ("), and the backslash character ("\"). +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +// copied from Go 1.8 stdlib's encoding/json/#hex +var hex = "0123456789abcdef" + +// escapeStringBytes escapes and writes the passed in string bytes to the dst +// buffer +// +// Copied and modifed from Go 1.8 stdlib's encodeing/json/#encodeState.stringBytes +func escapeStringBytes(e *bytes.Buffer, s []byte) { + e.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + e.Write(s[start:i]) + } + switch b { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(b) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + e.WriteString(`\u00`) + e.WriteByte(hex[b>>4]) + e.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + e.Write(s[start:i]) + } + e.WriteString(`\u202`) + e.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + e.Write(s[start:]) + } + e.WriteByte('"') +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go new file mode 100644 index 00000000000..722346d0358 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/object.go @@ -0,0 +1,40 @@ +package json + +import ( + "bytes" +) + +// Object represents the encoding of a JSON Object type +type Object struct { + w *bytes.Buffer + writeComma bool + scratch *[]byte +} + +func newObject(w *bytes.Buffer, scratch *[]byte) *Object { + w.WriteRune(leftBrace) + return &Object{w: w, scratch: scratch} +} + +func (o *Object) writeKey(key string) { + escapeStringBytes(o.w, []byte(key)) + o.w.WriteRune(colon) +} + +// Key adds the given named key to the JSON object. +// Returns a Value encoder that should be used to encode +// a JSON value type. +func (o *Object) Key(name string) Value { + if o.writeComma { + o.w.WriteRune(comma) + } else { + o.writeComma = true + } + o.writeKey(name) + return newValue(o.w, o.scratch) +} + +// Close encodes the end of the JSON Object +func (o *Object) Close() { + o.w.WriteRune(rightBrace) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/value.go b/vendor/github.com/aws/smithy-go/encoding/json/value.go new file mode 100644 index 00000000000..b41ff1e15c2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/json/value.go @@ -0,0 +1,149 @@ +package json + +import ( + "bytes" + "encoding/base64" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents a JSON Value type +// JSON Value types: Object, Array, String, Number, Boolean, and Null +type Value struct { + w *bytes.Buffer + scratch *[]byte +} + +// newValue returns a new Value encoder +func newValue(w *bytes.Buffer, scratch *[]byte) Value { + return Value{w: w, scratch: scratch} +} + +// String encodes v as a JSON string +func (jv Value) String(v string) { + escapeStringBytes(jv.w, []byte(v)) +} + +// Byte encodes v as a JSON number +func (jv Value) Byte(v int8) { + jv.Long(int64(v)) +} + +// Short encodes v as a JSON number +func (jv Value) Short(v int16) { + jv.Long(int64(v)) +} + +// Integer encodes v as a JSON number +func (jv Value) Integer(v int32) { + jv.Long(int64(v)) +} + +// Long encodes v as a JSON number +func (jv Value) Long(v int64) { + *jv.scratch = strconv.AppendInt((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// ULong encodes v as a JSON number +func (jv Value) ULong(v uint64) { + *jv.scratch = strconv.AppendUint((*jv.scratch)[:0], v, 10) + jv.w.Write(*jv.scratch) +} + +// Float encodes v as a JSON number +func (jv Value) Float(v float32) { + jv.float(float64(v), 32) +} + +// Double encodes v as a JSON number +func (jv Value) Double(v float64) { + jv.float(v, 64) +} + +func (jv Value) float(v float64, bits int) { + *jv.scratch = encoding.EncodeFloat((*jv.scratch)[:0], v, bits) + jv.w.Write(*jv.scratch) +} + +// Boolean encodes v as a JSON boolean +func (jv Value) Boolean(v bool) { + *jv.scratch = strconv.AppendBool((*jv.scratch)[:0], v) + jv.w.Write(*jv.scratch) +} + +// Base64EncodeBytes writes v as a base64 value in JSON string +func (jv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(jv.w, (*jv.scratch)[:0], v) +} + +// Write writes v directly to the JSON document +func (jv Value) Write(v []byte) { + jv.w.Write(v) +} + +// Array returns a new Array encoder +func (jv Value) Array() *Array { + return newArray(jv.w, jv.scratch) +} + +// Object returns a new Object encoder +func (jv Value) Object() *Object { + return newObject(jv.w, jv.scratch) +} + +// Null encodes a null JSON value +func (jv Value) Null() { + jv.w.WriteString(null) +} + +// BigInteger encodes v as JSON value +func (jv Value) BigInteger(v *big.Int) { + jv.w.Write([]byte(v.Text(10))) +} + +// BigDecimal encodes v as JSON value +func (jv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + jv.Long(i) + return + } + // TODO: Should this try to match ES6 ToString similar to stdlib JSON? + jv.w.Write([]byte(v.Text('e', -1))) +} + +// Based on encoding/json encodeByteSlice from the Go Standard Library +// https://golang.org/src/encoding/json/encode.go +func encodeByteSlice(w *bytes.Buffer, scratch []byte, v []byte) { + if v == nil { + w.WriteString(null) + return + } + + w.WriteRune(quote) + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } + + w.WriteRune(quote) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/array.go b/vendor/github.com/aws/smithy-go/encoding/xml/array.go new file mode 100644 index 00000000000..508f3c997ec --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/array.go @@ -0,0 +1,49 @@ +package xml + +// arrayMemberWrapper is the default member wrapper tag name for XML Array type +var arrayMemberWrapper = StartElement{ + Name: Name{Local: "member"}, +} + +// Array represents the encoding of a XML array type +type Array struct { + w writer + scratch *[]byte + + // member start element is the array member wrapper start element + memberStartElement StartElement + + // isFlattened indicates if the array is a flattened array. + isFlattened bool +} + +// newArray returns an array encoder. +// It also takes in the member start element, array start element. +// It takes in a isFlattened bool, indicating that an array is flattened array. +// +// A wrapped array ["value1", "value2"] is represented as +// `value1value2`. + +// A flattened array `someList: ["value1", "value2"]` is represented as +// `value1value2`. +func newArray(w writer, scratch *[]byte, memberStartElement StartElement, arrayStartElement StartElement, isFlattened bool) *Array { + var memberWrapper = memberStartElement + if isFlattened { + memberWrapper = arrayStartElement + } + + return &Array{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: isFlattened, + } +} + +// Member adds a new member to the XML array. +// It returns a Value encoder. +func (a *Array) Member() Value { + v := newValue(a.w, a.scratch, a.memberStartElement) + v.isFlattened = a.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/constants.go b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go new file mode 100644 index 00000000000..ccee90a636b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/constants.go @@ -0,0 +1,10 @@ +package xml + +const ( + leftAngleBracket = '<' + rightAngleBracket = '>' + forwardSlash = '/' + colon = ':' + equals = '=' + quote = '"' +) diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go new file mode 100644 index 00000000000..f9200093e87 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -0,0 +1,49 @@ +/* +Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to +shape serializer function in which a xml.Value will be passed around. + +Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings + +Member Element + +Member element should be used to encode xml shapes into xml elements except for flattened xml shapes. Member element +write their own element start tag. These elements should always be closed. + +Flattened Element + +Flattened element should be used to encode shapes marked with flattened trait into xml elements. Flattened element +do not write a start tag, and thus should not be closed. + +Simple types encoding + +All simple type methods on value such as String(), Long() etc; auto close the associated member element. + +Array + +Array returns the collection encoder. It has two modes, wrapped and flattened encoding. + +Wrapped arrays have two methods Array() and ArrayWithCustomName() which facilitate array member wrapping. +By default, a wrapped array members are wrapped with `member` named start element. + + appletree + +Flattened arrays rely on Value being marked as flattened. +If a shape is marked as flattened, Array() will use the shape element name as wrapper for array elements. + + appletree + +Map + +Map is the map encoder. It has two modes, wrapped and flattened encoding. + +Wrapped map has Array() method, which facilitate map member wrapping. +By default, a wrapped map members are wrapped with `entry` named start element. + + appletreesnowice + +Flattened map rely on Value being marked as flattened. +If a shape is marked as flattened, Map() will use the shape element name as wrapper for map entry elements. + + appletreesnowice +*/ +package xml diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/element.go b/vendor/github.com/aws/smithy-go/encoding/xml/element.go new file mode 100644 index 00000000000..ae84e7999ed --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/element.go @@ -0,0 +1,91 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +// A Name represents an XML name (Local) annotated +// with a name space identifier (Space). +// In tokens returned by Decoder.Token, the Space identifier +// is given as a canonical URL, not the short prefix used +// in the document being parsed. +type Name struct { + Space, Local string +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +/* +NewAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. +*/ +func NewAttribute(local, value string) Attr { + return Attr{ + Name: Name{ + Local: local, + }, + Value: value, + } +} + +/* +NewNamespaceAttribute returns a pointer to an attribute. +It takes in a local name aka attribute name, and value +representing the attribute value. + +NewNamespaceAttribute appends `xmlns:` in front of namespace +prefix. + +For creating a name space attribute representing +`xmlns:prefix="http://example.com`, the breakdown would be: +local = "prefix" +value = "http://example.com" +*/ +func NewNamespaceAttribute(local, value string) Attr { + attr := NewAttribute(local, value) + + // default name space identifier + attr.Name.Space = "xmlns" + return attr +} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +// Copy creates a new copy of StartElement. +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// returns true if start element local name is empty +func (e StartElement) isZero() bool { + return len(e.Name.Local) == 0 +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// returns true if end element local name is empty +func (e EndElement) isZero() bool { + return len(e.Name.Local) == 0 +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go new file mode 100644 index 00000000000..16fb3dddb0a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/encoder.go @@ -0,0 +1,51 @@ +package xml + +// writer interface used by the xml encoder to write an encoded xml +// document in a writer. +type writer interface { + + // Write takes in a byte slice and returns number of bytes written and error + Write(p []byte) (n int, err error) + + // WriteRune takes in a rune and returns number of bytes written and error + WriteRune(r rune) (n int, err error) + + // WriteString takes in a string and returns number of bytes written and error + WriteString(s string) (n int, err error) + + // String method returns a string + String() string + + // Bytes return a byte slice. + Bytes() []byte +} + +// Encoder is an XML encoder that supports construction of XML values +// using methods. The encoder takes in a writer and maintains a scratch buffer. +type Encoder struct { + w writer + scratch *[]byte +} + +// NewEncoder returns an XML encoder +func NewEncoder(w writer) *Encoder { + scratch := make([]byte, 64) + + return &Encoder{w: w, scratch: &scratch} +} + +// String returns the string output of the XML encoder +func (e Encoder) String() string { + return e.w.String() +} + +// Bytes returns the []byte slice of the XML encoder +func (e Encoder) Bytes() []byte { + return e.w.Bytes() +} + +// RootElement builds a root element encoding +// It writes it's start element tag. The value should be closed. +func (e Encoder) RootElement(element StartElement) Value { + return newValue(e.w, e.scratch, element) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go new file mode 100644 index 00000000000..f3db6ccca85 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/error_utils.go @@ -0,0 +1,51 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "io" +) + +// ErrorComponents represents the error response fields +// that will be deserialized from an xml error response body +type ErrorComponents struct { + Code string + Message string +} + +// GetErrorResponseComponents returns the error fields from an xml error response body +func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorComponents, error) { + if noErrorWrapping { + var errResponse noWrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil + } + + var errResponse wrappedErrorResponse + if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF { + return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err) + } + return ErrorComponents{ + Code: errResponse.Code, + Message: errResponse.Message, + }, nil +} + +// noWrappedErrorResponse represents the error response body with +// no internal ... +type wrappedErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/escape.go b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go new file mode 100644 index 00000000000..1c5479af677 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/escape.go @@ -0,0 +1,137 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied and modified from Go 1.14 stdlib's encoding/xml + +package xml + +import ( + "unicode/utf8" +) + +// Copied from Go 1.14 stdlib's encoding/xml +var ( + escQuot = []byte(""") // shorter than """ + escApos = []byte("'") // shorter than "'" + escAmp = []byte("&") + escLT = []byte("<") + escGT = []byte(">") + escTab = []byte(" ") + escNL = []byte(" ") + escCR = []byte(" ") + escFFFD = []byte("\uFFFD") // Unicode replacement character + + // Additional Escapes + escNextLine = []byte("…") + escLS = []byte("
") +) + +// Decide whether the given rune is in the XML Character Range, per +// the Char production of https://www.xml.com/axml/testaxml.htm, +// Section 2.2 Characters. +func isInCharacterRange(r rune) (inrange bool) { + return r == 0x09 || + r == 0x0A || + r == 0x0D || + r >= 0x20 && r <= 0xD7FF || + r >= 0xE000 && r <= 0xFFFD || + r >= 0x10000 && r <= 0x10FFFF +} + +// TODO: When do we need to escape the string? +// Based on encoding/xml escapeString from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeString(e writer, s string) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRuneInString(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.WriteString(s[last : i-width]) + e.Write(esc) + last = i + } + e.WriteString(s[last:]) +} + +// escapeText writes to w the properly escaped XML equivalent +// of the plain text data s. If escapeNewline is true, newline +// characters will be escaped. +// +// Based on encoding/xml escapeText from the Go Standard Library. +// https://golang.org/src/encoding/xml/xml.go +func escapeText(e writer, s []byte) { + var esc []byte + last := 0 + for i := 0; i < len(s); { + r, width := utf8.DecodeRune(s[i:]) + i += width + switch r { + case '"': + esc = escQuot + case '\'': + esc = escApos + case '&': + esc = escAmp + case '<': + esc = escLT + case '>': + esc = escGT + case '\t': + esc = escTab + case '\n': + // This always escapes newline, which is different than stdlib's optional + // escape of new line. + esc = escNL + case '\r': + esc = escCR + case '\u0085': + // Not escaped by stdlib + esc = escNextLine + case '\u2028': + // Not escaped by stdlib + esc = escLS + default: + if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { + esc = escFFFD + break + } + continue + } + e.Write(s[last : i-width]) + e.Write(esc) + last = i + } + e.Write(s[last:]) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/map.go b/vendor/github.com/aws/smithy-go/encoding/xml/map.go new file mode 100644 index 00000000000..e42858965cc --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/map.go @@ -0,0 +1,53 @@ +package xml + +// mapEntryWrapper is the default member wrapper start element for XML Map entry +var mapEntryWrapper = StartElement{ + Name: Name{Local: "entry"}, +} + +// Map represents the encoding of a XML map type +type Map struct { + w writer + scratch *[]byte + + // member start element is the map entry wrapper start element + memberStartElement StartElement + + // isFlattened returns true if the map is a flattened map + isFlattened bool +} + +// newMap returns a map encoder which sets the default map +// entry wrapper to `entry`. +// +// A map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newMap(w writer, scratch *[]byte) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: mapEntryWrapper, + } +} + +// newFlattenedMap returns a map encoder which sets the map +// entry wrapper to the passed in memberWrapper`. +// +// A flattened map `someMap : {{key:"abc", value:"123"}}` is represented as +// `abc123`. +func newFlattenedMap(w writer, scratch *[]byte, memberWrapper StartElement) *Map { + return &Map{ + w: w, + scratch: scratch, + memberStartElement: memberWrapper, + isFlattened: true, + } +} + +// Entry returns a Value encoder with map's element. +// It writes the member wrapper start tag for each entry. +func (m *Map) Entry() Value { + v := newValue(m.w, m.scratch, m.memberStartElement) + v.isFlattened = m.isFlattened + return v +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/value.go b/vendor/github.com/aws/smithy-go/encoding/xml/value.go new file mode 100644 index 00000000000..09434b2c0b5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/value.go @@ -0,0 +1,302 @@ +package xml + +import ( + "encoding/base64" + "fmt" + "math/big" + "strconv" + + "github.com/aws/smithy-go/encoding" +) + +// Value represents an XML Value type +// XML Value types: Object, Array, Map, String, Number, Boolean. +type Value struct { + w writer + scratch *[]byte + + // xml start element is the associated start element for the Value + startElement StartElement + + // indicates if the Value represents a flattened shape + isFlattened bool +} + +// newFlattenedValue returns a Value encoder. newFlattenedValue does NOT write the start element tag +func newFlattenedValue(w writer, scratch *[]byte, startElement StartElement) Value { + return Value{ + w: w, + scratch: scratch, + startElement: startElement, + } +} + +// newValue writes the start element xml tag and returns a Value +func newValue(w writer, scratch *[]byte, startElement StartElement) Value { + writeStartElement(w, startElement) + return Value{w: w, scratch: scratch, startElement: startElement} +} + +// writeStartElement takes in a start element and writes it. +// It handles namespace, attributes in start element. +func writeStartElement(w writer, el StartElement) error { + if el.isZero() { + return fmt.Errorf("xml start element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + for _, attr := range el.Attr { + w.WriteRune(' ') + writeAttribute(w, &attr) + } + + w.WriteRune(rightAngleBracket) + return nil +} + +// writeAttribute writes an attribute from a provided Attribute +// For a namespace attribute, the attr.Name.Space must be defined as "xmlns". +// https://www.w3.org/TR/REC-xml-names/#NT-DefaultAttName +func writeAttribute(w writer, attr *Attr) { + // if local, space both are not empty + if len(attr.Name.Space) != 0 && len(attr.Name.Local) != 0 { + escapeString(w, attr.Name.Space) + w.WriteRune(colon) + } + + // if prefix is empty, the default `xmlns` space should be used as prefix. + if len(attr.Name.Local) == 0 { + attr.Name.Local = attr.Name.Space + } + + escapeString(w, attr.Name.Local) + w.WriteRune(equals) + w.WriteRune(quote) + escapeString(w, attr.Value) + w.WriteRune(quote) +} + +// writeEndElement takes in a end element and writes it. +func writeEndElement(w writer, el EndElement) error { + if el.isZero() { + return fmt.Errorf("xml end element cannot be nil") + } + + w.WriteRune(leftAngleBracket) + w.WriteRune(forwardSlash) + + if len(el.Name.Space) != 0 { + escapeString(w, el.Name.Space) + w.WriteRune(colon) + } + escapeString(w, el.Name.Local) + w.WriteRune(rightAngleBracket) + + return nil +} + +// String encodes v as a XML string. +// It will auto close the parent xml element tag. +func (xv Value) String(v string) { + escapeString(xv.w, v) + xv.Close() +} + +// Byte encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Byte(v int8) { + xv.Long(int64(v)) +} + +// Short encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Short(v int16) { + xv.Long(int64(v)) +} + +// Integer encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Integer(v int32) { + xv.Long(int64(v)) +} + +// Long encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Long(v int64) { + *xv.scratch = strconv.AppendInt((*xv.scratch)[:0], v, 10) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Float encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Float(v float32) { + xv.float(float64(v), 32) + xv.Close() +} + +// Double encodes v as a XML number. +// It will auto close the parent xml element tag. +func (xv Value) Double(v float64) { + xv.float(v, 64) + xv.Close() +} + +func (xv Value) float(v float64, bits int) { + *xv.scratch = encoding.EncodeFloat((*xv.scratch)[:0], v, bits) + xv.w.Write(*xv.scratch) +} + +// Boolean encodes v as a XML boolean. +// It will auto close the parent xml element tag. +func (xv Value) Boolean(v bool) { + *xv.scratch = strconv.AppendBool((*xv.scratch)[:0], v) + xv.w.Write(*xv.scratch) + + xv.Close() +} + +// Base64EncodeBytes writes v as a base64 value in XML string. +// It will auto close the parent xml element tag. +func (xv Value) Base64EncodeBytes(v []byte) { + encodeByteSlice(xv.w, (*xv.scratch)[:0], v) + xv.Close() +} + +// BigInteger encodes v big.Int as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigInteger(v *big.Int) { + xv.w.Write([]byte(v.Text(10))) + xv.Close() +} + +// BigDecimal encodes v big.Float as XML value. +// It will auto close the parent xml element tag. +func (xv Value) BigDecimal(v *big.Float) { + if i, accuracy := v.Int64(); accuracy == big.Exact { + xv.Long(i) + return + } + + xv.w.Write([]byte(v.Text('e', -1))) + xv.Close() +} + +// Write writes v directly to the xml document +// if escapeXMLText is set to true, write will escape text. +// It will auto close the parent xml element tag. +func (xv Value) Write(v []byte, escapeXMLText bool) { + // escape and write xml text + if escapeXMLText { + escapeText(xv.w, v) + } else { + // write xml directly + xv.w.Write(v) + } + + xv.Close() +} + +// MemberElement does member element encoding. It returns a Value. +// Member Element method should be used for all shapes except flattened shapes. +// +// A call to MemberElement will write nested element tags directly using the +// provided start element. The value returned by MemberElement should be closed. +func (xv Value) MemberElement(element StartElement) Value { + return newValue(xv.w, xv.scratch, element) +} + +// FlattenedElement returns flattened element encoding. It returns a Value. +// This method should be used for flattened shapes. +// +// Unlike MemberElement, flattened element will NOT write element tags +// directly for the associated start element. +// +// The value returned by the FlattenedElement does not need to be closed. +func (xv Value) FlattenedElement(element StartElement) Value { + v := newFlattenedValue(xv.w, xv.scratch, element) + v.isFlattened = true + return v +} + +// Array returns an array encoder. By default, the members of array are +// wrapped with `` element tag. +// If value is marked as flattened, the start element is used to wrap the members instead of +// the `` element. +func (xv Value) Array() *Array { + return newArray(xv.w, xv.scratch, arrayMemberWrapper, xv.startElement, xv.isFlattened) +} + +/* +ArrayWithCustomName returns an array encoder. + +It takes named start element as an argument, the named start element will used to wrap xml array entries. +for eg, `entry1` +Here `customName` named start element will be wrapped on each array member. +*/ +func (xv Value) ArrayWithCustomName(element StartElement) *Array { + return newArray(xv.w, xv.scratch, element, xv.startElement, xv.isFlattened) +} + +/* +Map returns a map encoder. By default, the map entries are +wrapped with `` element tag. + +If value is marked as flattened, the start element is used to wrap the entry instead of +the `` element. +*/ +func (xv Value) Map() *Map { + // flattened map + if xv.isFlattened { + return newFlattenedMap(xv.w, xv.scratch, xv.startElement) + } + + // un-flattened map + return newMap(xv.w, xv.scratch) +} + +// encodeByteSlice is modified copy of json encoder's encodeByteSlice. +// It is used to base64 encode a byte slice. +func encodeByteSlice(w writer, scratch []byte, v []byte) { + if v == nil { + return + } + + encodedLen := base64.StdEncoding.EncodedLen(len(v)) + if encodedLen <= len(scratch) { + // If the encoded bytes fit in e.scratch, avoid an extra + // allocation and use the cheaper Encoding.Encode. + dst := scratch[:encodedLen] + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else if encodedLen <= 1024 { + // The encoded bytes are short enough to allocate for, and + // Encoding.Encode is still cheaper. + dst := make([]byte, encodedLen) + base64.StdEncoding.Encode(dst, v) + w.Write(dst) + } else { + // The encoded bytes are too long to cheaply allocate, and + // Encoding.Encode is no longer noticeably cheaper. + enc := base64.NewEncoder(base64.StdEncoding, w) + enc.Write(v) + enc.Close() + } +} + +// IsFlattened returns true if value is for flattened shape. +func (xv Value) IsFlattened() bool { + return xv.isFlattened +} + +// Close closes the value. +func (xv Value) Close() { + writeEndElement(xv.w, xv.startElement.End()) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go new file mode 100644 index 00000000000..dc4eebdffa7 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/encoding/xml/xml_decoder.go @@ -0,0 +1,154 @@ +package xml + +import ( + "encoding/xml" + "fmt" + "strings" +) + +// NodeDecoder is a XML decoder wrapper that is responsible to decoding +// a single XML Node element and it's nested member elements. This wrapper decoder +// takes in the start element of the top level node being decoded. +type NodeDecoder struct { + Decoder *xml.Decoder + StartEl xml.StartElement +} + +// WrapNodeDecoder returns an initialized XMLNodeDecoder +func WrapNodeDecoder(decoder *xml.Decoder, startEl xml.StartElement) NodeDecoder { + return NodeDecoder{ + Decoder: decoder, + StartEl: startEl, + } +} + +// Token on a Node Decoder returns a xml StartElement. It returns a boolean that indicates the +// a token is the node decoder's end node token; and an error which indicates any error +// that occurred while retrieving the start element +func (d NodeDecoder) Token() (t xml.StartElement, done bool, err error) { + for { + token, e := d.Decoder.Token() + if e != nil { + return t, done, e + } + + // check if we reach end of the node being decoded + if el, ok := token.(xml.EndElement); ok { + return t, el == d.StartEl.End(), err + } + + if t, ok := token.(xml.StartElement); ok { + return restoreAttrNamespaces(t), false, err + } + + // skip token if it is a comment or preamble or empty space value due to indentation + // or if it's a value and is not expected + } +} + +// restoreAttrNamespaces update XML attributes to restore the short namespaces found within +// the raw XML document. +func restoreAttrNamespaces(node xml.StartElement) xml.StartElement { + if len(node.Attr) == 0 { + return node + } + + // Generate a mapping of XML namespace values to their short names. + ns := map[string]string{} + for _, a := range node.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + break + } + } + + for i, a := range node.Attr { + if a.Name.Space == "xmlns" { + continue + } + // By default, xml.Decoder will fully resolve these namespaces. So if you had + // then by default the second attribute would have the `Name.Space` resolved to `baz`. But we need it to + // continue to resolve as `bar` so we can easily identify it later on. + if v, ok := ns[node.Attr[i].Name.Space]; ok { + node.Attr[i].Name.Space = v + } + } + return node +} + +// GetElement looks for the given tag name at the current level, and returns the element if found, and +// skipping over non-matching elements. Returns an error if the node is not found, or if an error occurs while walking +// the document. +func (d NodeDecoder) GetElement(name string) (t xml.StartElement, err error) { + for { + token, done, err := d.Token() + if err != nil { + return t, err + } + if done { + return t, fmt.Errorf("%s node not found", name) + } + switch { + case strings.EqualFold(name, token.Name.Local): + return token, nil + default: + err = d.Decoder.Skip() + if err != nil { + return t, err + } + } + } +} + +// Value provides an abstraction to retrieve char data value within an xml element. +// The method will return an error if it encounters a nested xml element instead of char data. +// This method should only be used to retrieve simple type or blob shape values as []byte. +func (d NodeDecoder) Value() (c []byte, err error) { + t, e := d.Decoder.Token() + if e != nil { + return c, e + } + + endElement := d.StartEl.End() + + switch ev := t.(type) { + case xml.CharData: + c = ev.Copy() + case xml.EndElement: // end tag or self-closing + if ev == endElement { + return []byte{}, err + } + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + default: + return c, fmt.Errorf("expected value for %v element, got %T type %v instead", d.StartEl.Name.Local, t, t) + } + + t, e = d.Decoder.Token() + if e != nil { + return c, e + } + + if ev, ok := t.(xml.EndElement); ok { + if ev == endElement { + return c, err + } + } + + return c, fmt.Errorf("expected end element %v, got %T type %v instead", endElement, t, t) +} + +// FetchRootElement takes in a decoder and returns the first start element within the xml body. +// This function is useful in fetching the start element of an XML response and ignore the +// comments and preamble +func FetchRootElement(decoder *xml.Decoder) (startElement xml.StartElement, err error) { + for { + t, e := decoder.Token() + if e != nil { + return startElement, e + } + + if startElement, ok := t.(xml.StartElement); ok { + return startElement, err + } + } +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go new file mode 100644 index 00000000000..a9352839748 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -0,0 +1,23 @@ +package transport + +import ( + "net/http" + "net/url" + + "github.com/aws/smithy-go" +) + +// Endpoint is the endpoint object returned by Endpoint resolution V2 +type Endpoint struct { + // The complete URL minimally specfiying the scheme and host. + // May optionally specify the port and base path component. + URI url.URL + + // An optional set of headers to be sent using transport layer headers. + Headers http.Header + + // A grab-bag property map of endpoint attributes. The + // values present here are subject to change, or being add/removed at any + // time. + Properties smithy.Properties +} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go new file mode 100644 index 00000000000..d6948d02062 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/errors.go @@ -0,0 +1,137 @@ +package smithy + +import "fmt" + +// APIError provides the generic API and protocol agnostic error type all SDK +// generated exception types will implement. +type APIError interface { + error + + // ErrorCode returns the error code for the API exception. + ErrorCode() string + // ErrorMessage returns the error message for the API exception. + ErrorMessage() string + // ErrorFault returns the fault for the API exception. + ErrorFault() ErrorFault +} + +// GenericAPIError provides a generic concrete API error type that SDKs can use +// to deserialize error responses into. Should be used for unmodeled or untyped +// errors. +type GenericAPIError struct { + Code string + Message string + Fault ErrorFault +} + +// ErrorCode returns the error code for the API exception. +func (e *GenericAPIError) ErrorCode() string { return e.Code } + +// ErrorMessage returns the error message for the API exception. +func (e *GenericAPIError) ErrorMessage() string { return e.Message } + +// ErrorFault returns the fault for the API exception. +func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } + +func (e *GenericAPIError) Error() string { + return fmt.Sprintf("api error %s: %s", e.Code, e.Message) +} + +var _ APIError = (*GenericAPIError)(nil) + +// OperationError decorates an underlying error which occurred while invoking +// an operation with names of the operation and API. +type OperationError struct { + ServiceID string + OperationName string + Err error +} + +// Service returns the name of the API service the error occurred with. +func (e *OperationError) Service() string { return e.ServiceID } + +// Operation returns the name of the API operation the error occurred with. +func (e *OperationError) Operation() string { return e.OperationName } + +// Unwrap returns the nested error if any, or nil. +func (e *OperationError) Unwrap() error { return e.Err } + +func (e *OperationError) Error() string { + return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) +} + +// DeserializationError provides a wrapper for an error that occurs during +// deserialization. +type DeserializationError struct { + Err error // original error + Snapshot []byte +} + +// Error returns a formatted error for DeserializationError +func (e *DeserializationError) Error() string { + const msg = "deserialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s, %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in DeserializationError +func (e *DeserializationError) Unwrap() error { return e.Err } + +// ErrorFault provides the type for a Smithy API error fault. +type ErrorFault int + +// ErrorFault enumeration values +const ( + FaultUnknown ErrorFault = iota + FaultServer + FaultClient +) + +func (f ErrorFault) String() string { + switch f { + case FaultServer: + return "server" + case FaultClient: + return "client" + default: + return "unknown" + } +} + +// SerializationError represents an error that occurred while attempting to serialize a request +type SerializationError struct { + Err error // original error +} + +// Error returns a formatted error for SerializationError +func (e *SerializationError) Error() string { + const msg = "serialization failed" + if e.Err == nil { + return msg + } + return fmt.Sprintf("%s: %v", msg, e.Err) +} + +// Unwrap returns the underlying Error in SerializationError +func (e *SerializationError) Unwrap() error { return e.Err } + +// CanceledError is the error that will be returned by an API request that was +// canceled. API operations given a Context may return this error when +// canceled. +type CanceledError struct { + Err error +} + +// CanceledError returns true to satisfy interfaces checking for canceled errors. +func (*CanceledError) CanceledError() bool { return true } + +// Unwrap returns the underlying error, if there was one. +func (e *CanceledError) Unwrap() error { + return e.Err +} + +func (e *CanceledError) Error() string { + return fmt.Sprintf("canceled, %v", e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go new file mode 100644 index 00000000000..d96be806df6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package smithy + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.16.0" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..fe6a62006a5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go new file mode 100644 index 00000000000..9c9d02b94b9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go @@ -0,0 +1,8 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight + +package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..e8a1b17d564 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/smithy-go/io/byte.go b/vendor/github.com/aws/smithy-go/io/byte.go new file mode 100644 index 00000000000..f8417c15b85 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/byte.go @@ -0,0 +1,12 @@ +package io + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/smithy-go/io/doc.go b/vendor/github.com/aws/smithy-go/io/doc.go new file mode 100644 index 00000000000..a6a33eaf567 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/doc.go @@ -0,0 +1,2 @@ +// Package io provides utilities for Smithy generated API clients. +package io diff --git a/vendor/github.com/aws/smithy-go/io/reader.go b/vendor/github.com/aws/smithy-go/io/reader.go new file mode 100644 index 00000000000..07063f2960d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/reader.go @@ -0,0 +1,16 @@ +package io + +import ( + "io" +) + +// ReadSeekNopCloser wraps an io.ReadSeeker with an additional Close method +// that does nothing. +type ReadSeekNopCloser struct { + io.ReadSeeker +} + +// Close does nothing. +func (ReadSeekNopCloser) Close() error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/io/ringbuffer.go b/vendor/github.com/aws/smithy-go/io/ringbuffer.go new file mode 100644 index 00000000000..06b476add8a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/io/ringbuffer.go @@ -0,0 +1,94 @@ +package io + +import ( + "bytes" + "io" +) + +// RingBuffer struct satisfies io.ReadWrite interface. +// +// ReadBuffer is a revolving buffer data structure, which can be used to store snapshots of data in a +// revolving window. +type RingBuffer struct { + slice []byte + start int + end int + size int +} + +// NewRingBuffer method takes in a byte slice as an input and returns a RingBuffer. +func NewRingBuffer(slice []byte) *RingBuffer { + ringBuf := RingBuffer{ + slice: slice, + } + return &ringBuf +} + +// Write method inserts the elements in a byte slice, and returns the number of bytes written along with any error. +func (r *RingBuffer) Write(p []byte) (int, error) { + for _, b := range p { + // check if end points to invalid index, we need to circle back + if r.end == len(r.slice) { + r.end = 0 + } + // check if start points to invalid index, we need to circle back + if r.start == len(r.slice) { + r.start = 0 + } + // if ring buffer is filled, increment the start index + if r.size == len(r.slice) { + r.size-- + r.start++ + } + + r.slice[r.end] = b + r.end++ + r.size++ + } + return len(p), nil +} + +// Read copies the data on the ring buffer into the byte slice provided to the method. +// Returns the read count along with any error encountered while reading. +func (r *RingBuffer) Read(p []byte) (int, error) { + // readCount keeps track of the number of bytes read + var readCount int + for j := 0; j < len(p); j++ { + // if ring buffer is empty or completely read + // return EOF error. + if r.size == 0 { + return readCount, io.EOF + } + + if r.start == len(r.slice) { + r.start = 0 + } + + p[j] = r.slice[r.start] + readCount++ + // increment the start pointer for ring buffer + r.start++ + // decrement the size of ring buffer + r.size-- + } + return readCount, nil +} + +// Len returns the number of unread bytes in the buffer. +func (r *RingBuffer) Len() int { + return r.size +} + +// Bytes returns a copy of the RingBuffer's bytes. +func (r RingBuffer) Bytes() []byte { + var b bytes.Buffer + io.Copy(&b, &r) + return b.Bytes() +} + +// Reset resets the ring buffer. +func (r *RingBuffer) Reset() { + *r = RingBuffer{ + slice: r.slice, + } +} diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh new file mode 100644 index 00000000000..800bf376954 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/local-mod-replace.sh @@ -0,0 +1,39 @@ +#1/usr/bin/env bash + +PROJECT_DIR="" +SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) + +usage() { + echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 + exit 1 +} + +while getopts "hs:d:" options; do + case "${options}" in + s) + SMITHY_SOURCE_DIR=${OPTARG} + if [ "$SMITHY_SOURCE_DIR" == "" ]; then + echo "path to smithy-go source directory is required" || exit + usage + fi + ;; + d) + PROJECT_DIR=${OPTARG} + ;; + h) + usage + ;; + *) + usage + ;; + esac +done + +if [ "$PROJECT_DIR" != "" ]; then + cd $PROJECT_DIR || exit +fi + +go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do + repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} + echo -replace $x=$repPath +done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/logging/logger.go b/vendor/github.com/aws/smithy-go/logging/logger.go new file mode 100644 index 00000000000..2071924bd30 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/logging/logger.go @@ -0,0 +1,82 @@ +package logging + +import ( + "context" + "io" + "log" +) + +// Classification is the type of the log entry's classification name. +type Classification string + +// Set of standard classifications that can be used by clients and middleware +const ( + Warn Classification = "WARN" + Debug Classification = "DEBUG" +) + +// Logger is an interface for logging entries at certain classifications. +type Logger interface { + // Logf is expected to support the standard fmt package "verbs". + Logf(classification Classification, format string, v ...interface{}) +} + +// LoggerFunc is a wrapper around a function to satisfy the Logger interface. +type LoggerFunc func(classification Classification, format string, v ...interface{}) + +// Logf delegates the logging request to the wrapped function. +func (f LoggerFunc) Logf(classification Classification, format string, v ...interface{}) { + f(classification, format, v...) +} + +// ContextLogger is an optional interface a Logger implementation may expose that provides +// the ability to create context aware log entries. +type ContextLogger interface { + WithContext(context.Context) Logger +} + +// WithContext will pass the provided context to logger if it implements the ContextLogger interface and return the resulting +// logger. Otherwise the logger will be returned as is. As a special case if a nil logger is provided, a Nop logger will +// be returned to the caller. +func WithContext(ctx context.Context, logger Logger) Logger { + if logger == nil { + return Nop{} + } + + cl, ok := logger.(ContextLogger) + if !ok { + return logger + } + + return cl.WithContext(ctx) +} + +// Nop is a Logger implementation that simply does not perform any logging. +type Nop struct{} + +// Logf simply returns without performing any action +func (n Nop) Logf(Classification, string, ...interface{}) { + return +} + +// StandardLogger is a Logger implementation that wraps the standard library logger, and delegates logging to it's +// Printf method. +type StandardLogger struct { + Logger *log.Logger +} + +// Logf logs the given classification and message to the underlying logger. +func (s StandardLogger) Logf(classification Classification, format string, v ...interface{}) { + if len(classification) != 0 { + format = string(classification) + " " + format + } + + s.Logger.Printf(format, v...) +} + +// NewStandardLogger returns a new StandardLogger +func NewStandardLogger(writer io.Writer) *StandardLogger { + return &StandardLogger{ + Logger: log.New(writer, "SDK ", log.LstdFlags), + } +} diff --git a/vendor/github.com/aws/smithy-go/middleware/doc.go b/vendor/github.com/aws/smithy-go/middleware/doc.go new file mode 100644 index 00000000000..9858928a7f8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/doc.go @@ -0,0 +1,67 @@ +// Package middleware provides transport agnostic middleware for decorating SDK +// handlers. +// +// The Smithy middleware stack provides ordered behavior to be invoked on an +// underlying handler. The stack is separated into steps that are invoked in a +// static order. A step is a collection of middleware that are injected into a +// ordered list defined by the user. The user may add, insert, swap, and remove a +// step's middleware. When the stack is invoked the step middleware become static, +// and their order cannot be modified. +// +// A stack and its step middleware are **not** safe to modify concurrently. +// +// A stack will use the ordered list of middleware to decorate a underlying +// handler. A handler could be something like an HTTP Client that round trips an +// API operation over HTTP. +// +// Smithy Middleware Stack +// +// A Stack is a collection of middleware that wrap a handler. The stack can be +// broken down into discreet steps. Each step may contain zero or more middleware +// specific to that stack's step. +// +// A Stack Step is a predefined set of middleware that are invoked in a static +// order by the Stack. These steps represent fixed points in the middleware stack +// for organizing specific behavior, such as serialize and build. A Stack Step is +// composed of zero or more middleware that are specific to that step. A step may +// define its own set of input/output parameters the generic input/output +// parameters are cast from. A step calls its middleware recursively, before +// calling the next step in the stack returning the result or error of the step +// middleware decorating the underlying handler. +// +// * Initialize: Prepares the input, and sets any default parameters as needed, +// (e.g. idempotency token, and presigned URLs). +// +// * Serialize: Serializes the prepared input into a data structure that can be +// consumed by the target transport's message, (e.g. REST-JSON serialization). +// +// * Build: Adds additional metadata to the serialized transport message, (e.g. +// HTTP's Content-Length header, or body checksum). Decorations and +// modifications to the message should be copied to all message attempts. +// +// * Finalize: Performs final preparations needed before sending the message. The +// message should already be complete by this stage, and is only alternated to +// meet the expectations of the recipient, (e.g. Retry and AWS SigV4 request +// signing). +// +// * Deserialize: Reacts to the handler's response returned by the recipient of +// the request message. Deserializes the response into a structured type or +// error above stacks can react to. +// +// Adding Middleware to a Stack Step +// +// Middleware can be added to a step front or back, or relative, by name, to an +// existing middleware in that stack. If a middleware does not have a name, a +// unique name will be generated at the middleware and be added to the step. +// +// // Create middleware stack +// stack := middleware.NewStack() +// +// // Add middleware to stack steps +// stack.Initialize.Add(paramValidationMiddleware, middleware.After) +// stack.Serialize.Add(marshalOperationFoo, middleware.After) +// stack.Deserialize.Add(unmarshalOperationFoo, middleware.After) +// +// // Invoke middleware on handler. +// resp, err := stack.HandleMiddleware(ctx, req.Input, clientHandler) +package middleware diff --git a/vendor/github.com/aws/smithy-go/middleware/logging.go b/vendor/github.com/aws/smithy-go/middleware/logging.go new file mode 100644 index 00000000000..c2f0dbb6bda --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/logging.go @@ -0,0 +1,46 @@ +package middleware + +import ( + "context" + + "github.com/aws/smithy-go/logging" +) + +// loggerKey is the context value key for which the logger is associated with. +type loggerKey struct{} + +// GetLogger takes a context to retrieve a Logger from. If no logger is present on the context a logging.Nop logger +// is returned. If the logger retrieved from context supports the ContextLogger interface, the context will be passed +// to the WithContext method and the resulting logger will be returned. Otherwise the stored logger is returned as is. +func GetLogger(ctx context.Context) logging.Logger { + logger, ok := ctx.Value(loggerKey{}).(logging.Logger) + if !ok || logger == nil { + return logging.Nop{} + } + + return logging.WithContext(ctx, logger) +} + +// SetLogger sets the provided logger value on the provided ctx. +func SetLogger(ctx context.Context, logger logging.Logger) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +type setLogger struct { + Logger logging.Logger +} + +// AddSetLoggerMiddleware adds a middleware that will add the provided logger to the middleware context. +func AddSetLoggerMiddleware(stack *Stack, logger logging.Logger) error { + return stack.Initialize.Add(&setLogger{Logger: logger}, After) +} + +func (a *setLogger) ID() string { + return "SetLogger" +} + +func (a *setLogger) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return next.HandleInitialize(SetLogger(ctx, a.Logger), in) +} diff --git a/vendor/github.com/aws/smithy-go/middleware/metadata.go b/vendor/github.com/aws/smithy-go/middleware/metadata.go new file mode 100644 index 00000000000..7bb7dbcf5a0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/metadata.go @@ -0,0 +1,65 @@ +package middleware + +// MetadataReader provides an interface for reading metadata from the +// underlying metadata container. +type MetadataReader interface { + Get(key interface{}) interface{} +} + +// Metadata provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Metadata uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Metadata struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m Metadata) Get(key interface{}) interface{} { + return m.values[key] +} + +// Clone creates a shallow copy of Metadata entries, returning a new Metadata +// value with the original entries copied into it. +func (m Metadata) Clone() Metadata { + vs := make(map[interface{}]interface{}, len(m.values)) + for k, v := range m.values { + vs[k] = v + } + + return Metadata{ + values: vs, + } +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Metadata) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m Metadata) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/middleware/middleware.go b/vendor/github.com/aws/smithy-go/middleware/middleware.go new file mode 100644 index 00000000000..803b7c75184 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/middleware.go @@ -0,0 +1,71 @@ +package middleware + +import ( + "context" +) + +// Handler provides the interface for performing the logic to obtain an output, +// or error for the given input. +type Handler interface { + // Handle performs logic to obtain an output for the given input. Handler + // should be decorated with middleware to perform input specific behavior. + Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, + ) +} + +// HandlerFunc provides a wrapper around a function pointer to be used as a +// middleware handler. +type HandlerFunc func(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) + +// Handle invokes the underlying function, returning the result. +func (fn HandlerFunc) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return fn(ctx, input) +} + +// Middleware provides the interface to call handlers in a chain. +type Middleware interface { + // ID provides a unique identifier for the middleware. + ID() string + + // Performs the middleware's handling of the input, returning the output, + // or error. The middleware can invoke the next Handler if handling should + // continue. + HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, + ) +} + +// decoratedHandler wraps a middleware in order to to call the next handler in +// the chain. +type decoratedHandler struct { + // The next handler to be called. + Next Handler + + // The current middleware decorating the handler. + With Middleware +} + +// Handle implements the Handler interface to handle a operation invocation. +func (m decoratedHandler) Handle(ctx context.Context, input interface{}) ( + output interface{}, metadata Metadata, err error, +) { + return m.With.HandleMiddleware(ctx, input, m.Next) +} + +// DecorateHandler decorates a handler with a middleware. Wrapping the handler +// with the middleware. +func DecorateHandler(h Handler, with ...Middleware) Handler { + for i := len(with) - 1; i >= 0; i-- { + h = decoratedHandler{ + Next: h, + With: with[i], + } + } + + return h +} diff --git a/vendor/github.com/aws/smithy-go/middleware/ordered_group.go b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go new file mode 100644 index 00000000000..4b195308c59 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/ordered_group.go @@ -0,0 +1,268 @@ +package middleware + +import "fmt" + +// RelativePosition provides specifying the relative position of a middleware +// in an ordered group. +type RelativePosition int + +// Relative position for middleware in steps. +const ( + After RelativePosition = iota + Before +) + +type ider interface { + ID() string +} + +// orderedIDs provides an ordered collection of items with relative ordering +// by name. +type orderedIDs struct { + order *relativeOrder + items map[string]ider +} + +const baseOrderedItems = 5 + +func newOrderedIDs() *orderedIDs { + return &orderedIDs{ + order: newRelativeOrder(), + items: make(map[string]ider, baseOrderedItems), + } +} + +// Add injects the item to the relative position of the item group. Returns an +// error if the item already exists. +func (g *orderedIDs) Add(m ider, pos RelativePosition) error { + id := m.ID() + if len(id) == 0 { + return fmt.Errorf("empty ID, ID must not be empty") + } + + if err := g.order.Add(pos, id); err != nil { + return err + } + + g.items[id] = m + return nil +} + +// Insert injects the item relative to an existing item id. Returns an error if +// the original item does not exist, or the item being added already exists. +func (g *orderedIDs) Insert(m ider, relativeTo string, pos RelativePosition) error { + if len(m.ID()) == 0 { + return fmt.Errorf("insert ID must not be empty") + } + if len(relativeTo) == 0 { + return fmt.Errorf("relative to ID must not be empty") + } + + if err := g.order.Insert(relativeTo, pos, m.ID()); err != nil { + return err + } + + g.items[m.ID()] = m + return nil +} + +// Get returns the ider identified by id. If ider is not present, returns false. +func (g *orderedIDs) Get(id string) (ider, bool) { + v, ok := g.items[id] + return v, ok +} + +// Swap removes the item by id, replacing it with the new item. Returns an error +// if the original item doesn't exist. +func (g *orderedIDs) Swap(id string, m ider) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("swap from ID must not be empty") + } + + iderID := m.ID() + if len(iderID) == 0 { + return nil, fmt.Errorf("swap to ID must not be empty") + } + + if err := g.order.Swap(id, iderID); err != nil { + return nil, err + } + + removed := g.items[id] + + delete(g.items, id) + g.items[iderID] = m + + return removed, nil +} + +// Remove removes the item by id. Returns an error if the item +// doesn't exist. +func (g *orderedIDs) Remove(id string) (ider, error) { + if len(id) == 0 { + return nil, fmt.Errorf("remove ID must not be empty") + } + + if err := g.order.Remove(id); err != nil { + return nil, err + } + + removed := g.items[id] + delete(g.items, id) + return removed, nil +} + +func (g *orderedIDs) List() []string { + items := g.order.List() + order := make([]string, len(items)) + copy(order, items) + return order +} + +// Clear removes all entries and slots. +func (g *orderedIDs) Clear() { + g.order.Clear() + g.items = map[string]ider{} +} + +// GetOrder returns the item in the order it should be invoked in. +func (g *orderedIDs) GetOrder() []interface{} { + order := g.order.List() + ordered := make([]interface{}, len(order)) + for i := 0; i < len(order); i++ { + ordered[i] = g.items[order[i]] + } + + return ordered +} + +// relativeOrder provides ordering of item +type relativeOrder struct { + order []string +} + +func newRelativeOrder() *relativeOrder { + return &relativeOrder{ + order: make([]string, 0, baseOrderedItems), + } +} + +// Add inserts an item into the order relative to the position provided. +func (s *relativeOrder) Add(pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + switch pos { + case Before: + return s.insert(0, Before, ids...) + + case After: + s.order = append(s.order, ids...) + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +// Insert injects an item before or after the relative item. Returns +// an error if the relative item does not exist. +func (s *relativeOrder) Insert(relativeTo string, pos RelativePosition, ids ...string) error { + if len(ids) == 0 { + return nil + } + + for _, id := range ids { + if _, ok := s.has(id); ok { + return fmt.Errorf("already exists, %v", id) + } + } + + i, ok := s.has(relativeTo) + if !ok { + return fmt.Errorf("not found, %v", relativeTo) + } + + return s.insert(i, pos, ids...) +} + +// Swap will replace the item id with the to item. Returns an +// error if the original item id does not exist. Allows swapping out an +// item for another item with the same id. +func (s *relativeOrder) Swap(id, to string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + if _, ok = s.has(to); ok && id != to { + return fmt.Errorf("already exists, %v", to) + } + + s.order[i] = to + return nil +} + +func (s *relativeOrder) Remove(id string) error { + i, ok := s.has(id) + if !ok { + return fmt.Errorf("not found, %v", id) + } + + s.order = append(s.order[:i], s.order[i+1:]...) + return nil +} + +func (s *relativeOrder) List() []string { + return s.order +} + +func (s *relativeOrder) Clear() { + s.order = s.order[0:0] +} + +func (s *relativeOrder) insert(i int, pos RelativePosition, ids ...string) error { + switch pos { + case Before: + n := len(ids) + var src []string + if n <= cap(s.order)-len(s.order) { + s.order = s.order[:len(s.order)+n] + src = s.order + } else { + src = s.order + s.order = make([]string, len(s.order)+n) + copy(s.order[:i], src[:i]) // only when allocating a new slice do we need to copy the front half + } + copy(s.order[i+n:], src[i:]) + copy(s.order[i:], ids) + case After: + if i == len(s.order)-1 || len(s.order) == 0 { + s.order = append(s.order, ids...) + } else { + s.order = append(s.order[:i+1], append(ids, s.order[i+1:]...)...) + } + + default: + return fmt.Errorf("invalid position, %v", int(pos)) + } + + return nil +} + +func (s *relativeOrder) has(id string) (i int, found bool) { + for i := 0; i < len(s.order); i++ { + if s.order[i] == id { + return i, true + } + } + return 0, false +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack.go b/vendor/github.com/aws/smithy-go/middleware/stack.go new file mode 100644 index 00000000000..45ccb5b93c9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack.go @@ -0,0 +1,209 @@ +package middleware + +import ( + "context" + "io" + "strings" +) + +// Stack provides protocol and transport agnostic set of middleware split into +// distinct steps. Steps have specific transitions between them, that are +// managed by the individual step. +// +// Steps are composed as middleware around the underlying handler in the +// following order: +// +// Initialize -> Serialize -> Build -> Finalize -> Deserialize -> Handler +// +// Any middleware within the chain may choose to stop and return an error or +// response. Since the middleware decorate the handler like a call stack, each +// middleware will receive the result of the next middleware in the chain. +// Middleware that does not need to react to an input, or result must forward +// along the input down the chain, or return the result back up the chain. +// +// Initialize <- Serialize -> Build -> Finalize <- Deserialize <- Handler +type Stack struct { + // Initialize prepares the input, and sets any default parameters as + // needed, (e.g. idempotency token, and presigned URLs). + // + // Takes Input Parameters, and returns result or error. + // + // Receives result or error from Serialize step. + Initialize *InitializeStep + + // Serialize serializes the prepared input into a data structure that can be consumed + // by the target transport's message, (e.g. REST-JSON serialization) + // + // Converts Input Parameters into a Request, and returns the result or error. + // + // Receives result or error from Build step. + Serialize *SerializeStep + + // Build adds additional metadata to the serialized transport message + // (e.g. HTTP's Content-Length header, or body checksum). Decorations and + // modifications to the message should be copied to all message attempts. + // + // Takes Request, and returns result or error. + // + // Receives result or error from Finalize step. + Build *BuildStep + + // Finalize performs final preparations needed before sending the message. The + // message should already be complete by this stage, and is only alternated + // to meet the expectations of the recipient (e.g. Retry and AWS SigV4 + // request signing) + // + // Takes Request, and returns result or error. + // + // Receives result or error from Deserialize step. + Finalize *FinalizeStep + + // Deserialize reacts to the handler's response returned by the recipient of the request + // message. Deserializes the response into a structured type or error above + // stacks can react to. + // + // Should only forward Request to underlying handler. + // + // Takes Request, and returns result or error. + // + // Receives raw response, or error from underlying handler. + Deserialize *DeserializeStep + + id string +} + +// NewStack returns an initialize empty stack. +func NewStack(id string, newRequestFn func() interface{}) *Stack { + return &Stack{ + id: id, + Initialize: NewInitializeStep(), + Serialize: NewSerializeStep(newRequestFn), + Build: NewBuildStep(), + Finalize: NewFinalizeStep(), + Deserialize: NewDeserializeStep(), + } +} + +// ID returns the unique ID for the stack as a middleware. +func (s *Stack) ID() string { return s.id } + +// HandleMiddleware invokes the middleware stack decorating the next handler. +// Each step of stack will be invoked in order before calling the next step. +// With the next handler call last. +// +// The input value must be the input parameters of the operation being +// performed. +// +// Will return the result of the operation, or error. +func (s *Stack) HandleMiddleware(ctx context.Context, input interface{}, next Handler) ( + output interface{}, metadata Metadata, err error, +) { + h := DecorateHandler(next, + s.Initialize, + s.Serialize, + s.Build, + s.Finalize, + s.Deserialize, + ) + + return h.Handle(ctx, input) +} + +// List returns a list of all middleware in the stack by step. +func (s *Stack) List() []string { + var l []string + l = append(l, s.id) + + l = append(l, s.Initialize.ID()) + l = append(l, s.Initialize.List()...) + + l = append(l, s.Serialize.ID()) + l = append(l, s.Serialize.List()...) + + l = append(l, s.Build.ID()) + l = append(l, s.Build.List()...) + + l = append(l, s.Finalize.ID()) + l = append(l, s.Finalize.List()...) + + l = append(l, s.Deserialize.ID()) + l = append(l, s.Deserialize.List()...) + + return l +} + +func (s *Stack) String() string { + var b strings.Builder + + w := &indentWriter{w: &b} + + w.WriteLine(s.id) + w.Push() + + writeStepItems(w, s.Initialize) + writeStepItems(w, s.Serialize) + writeStepItems(w, s.Build) + writeStepItems(w, s.Finalize) + writeStepItems(w, s.Deserialize) + + return b.String() +} + +type stackStepper interface { + ID() string + List() []string +} + +func writeStepItems(w *indentWriter, s stackStepper) { + type lister interface { + List() []string + } + + w.WriteLine(s.ID()) + w.Push() + + defer w.Pop() + + // ignore stack to prevent circular iterations + if _, ok := s.(*Stack); ok { + return + } + + for _, id := range s.List() { + w.WriteLine(id) + } +} + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) + WriteRune(rune) (int, error) +} + +type indentWriter struct { + w stringWriter + depth int +} + +const indentDepth = "\t\t\t\t\t\t\t\t\t\t" + +func (w *indentWriter) Push() { + w.depth++ +} + +func (w *indentWriter) Pop() { + w.depth-- + if w.depth < 0 { + w.depth = 0 + } +} + +func (w *indentWriter) WriteLine(v string) { + w.w.WriteString(indentDepth[:w.depth]) + + v = strings.ReplaceAll(v, "\n", "\\n") + v = strings.ReplaceAll(v, "\r", "\\r") + + w.w.WriteString(v) + w.w.WriteRune('\n') +} diff --git a/vendor/github.com/aws/smithy-go/middleware/stack_values.go b/vendor/github.com/aws/smithy-go/middleware/stack_values.go new file mode 100644 index 00000000000..ef96009ba18 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/stack_values.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "reflect" + "strings" +) + +// WithStackValue adds a key value pair to the context that is intended to be +// scoped to a stack. Use ClearStackValues to get a new context with all stack +// values cleared. +func WithStackValue(ctx context.Context, key, value interface{}) context.Context { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + + md = withStackValue(md, key, value) + return context.WithValue(ctx, stackValuesKey{}, md) +} + +// ClearStackValues returns a context without any stack values. +func ClearStackValues(ctx context.Context) context.Context { + return context.WithValue(ctx, stackValuesKey{}, nil) +} + +// GetStackValues returns the value pointed to by the key within the stack +// values, if it is present. +func GetStackValue(ctx context.Context, key interface{}) interface{} { + md, _ := ctx.Value(stackValuesKey{}).(*stackValues) + if md == nil { + return nil + } + + return md.Value(key) +} + +type stackValuesKey struct{} + +type stackValues struct { + key interface{} + value interface{} + parent *stackValues +} + +func withStackValue(parent *stackValues, key, value interface{}) *stackValues { + if key == nil { + panic("nil key") + } + if !reflect.TypeOf(key).Comparable() { + panic("key is not comparable") + } + return &stackValues{key: key, value: value, parent: parent} +} + +func (m *stackValues) Value(key interface{}) interface{} { + if key == m.key { + return m.value + } + + if m.parent == nil { + return nil + } + + return m.parent.Value(key) +} + +func (c *stackValues) String() string { + var str strings.Builder + + cc := c + for cc == nil { + str.WriteString("(" + + reflect.TypeOf(c.key).String() + + ": " + + stringify(cc.value) + + ")") + if cc.parent != nil { + str.WriteString(" -> ") + } + cc = cc.parent + } + str.WriteRune('}') + + return str.String() +} + +type stringer interface { + String() string +} + +// stringify tries a bit to stringify v, without using fmt, since we don't +// want context depending on the unicode tables. This is only used by +// *valueCtx.String(). +func stringify(v interface{}) string { + switch s := v.(type) { + case stringer: + return s.String() + case string: + return s + } + return "" +} diff --git a/vendor/github.com/aws/smithy-go/middleware/step_build.go b/vendor/github.com/aws/smithy-go/middleware/step_build.go new file mode 100644 index 00000000000..7e1d94caeef --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_build.go @@ -0,0 +1,211 @@ +package middleware + +import ( + "context" +) + +// BuildInput provides the input parameters for the BuildMiddleware to consume. +// BuildMiddleware may modify the Request value before forwarding the input +// along to the next BuildHandler. +type BuildInput struct { + Request interface{} +} + +// BuildOutput provides the result returned by the next BuildHandler. +type BuildOutput struct { + Result interface{} +} + +// BuildHandler provides the interface for the next handler the +// BuildMiddleware will call in the middleware chain. +type BuildHandler interface { + HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next BuildHandler for further +// processing. +type BuildMiddleware interface { + // Unique ID for the middleware in theBuildStep. The step does not allow + // duplicate IDs. + ID() string + + // Invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, + ) +} + +// BuildMiddlewareFunc returns a BuildMiddleware with the unique ID provided, +// and the func to be invoked. +func BuildMiddlewareFunc(id string, fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error)) BuildMiddleware { + return buildMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type buildMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, BuildInput, BuildHandler) (BuildOutput, Metadata, error) +} + +// ID returns the unique ID for the middleware. +func (s buildMiddlewareFunc) ID() string { return s.id } + +// HandleBuild invokes the middleware Fn. +func (s buildMiddlewareFunc) HandleBuild(ctx context.Context, in BuildInput, next BuildHandler) ( + out BuildOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ BuildMiddleware = (buildMiddlewareFunc{}) + +// BuildStep provides the ordered grouping of BuildMiddleware to be invoked on +// a handler. +type BuildStep struct { + ids *orderedIDs +} + +// NewBuildStep returns a BuildStep ready to have middleware for +// initialization added to it. +func NewBuildStep() *BuildStep { + return &BuildStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*BuildStep)(nil) + +// ID returns the unique name of the step as a middleware. +func (s *BuildStep) ID() string { + return "Build stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *BuildStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h BuildHandler = buildWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedBuildHandler{ + Next: h, + With: order[i].(BuildMiddleware), + } + } + + sIn := BuildInput{ + Request: in, + } + + res, metadata, err := h.HandleBuild(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *BuildStep) Get(id string) (BuildMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(BuildMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *BuildStep) Add(m BuildMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware id. +// Returns an error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *BuildStep) Insert(m BuildMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or an error if the middleware to be removed +// doesn't exist. +func (s *BuildStep) Swap(id string, m BuildMiddleware) (BuildMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *BuildStep) Remove(id string) (BuildMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(BuildMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *BuildStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *BuildStep) Clear() { + s.ids.Clear() +} + +type buildWrapHandler struct { + Next Handler +} + +var _ BuildHandler = (*buildWrapHandler)(nil) + +// Implements BuildHandler, converts types and delegates to underlying +// generic handler. +func (w buildWrapHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return BuildOutput{ + Result: res, + }, metadata, err +} + +type decoratedBuildHandler struct { + Next BuildHandler + With BuildMiddleware +} + +var _ BuildHandler = (*decoratedBuildHandler)(nil) + +func (h decoratedBuildHandler) HandleBuild(ctx context.Context, in BuildInput) ( + out BuildOutput, metadata Metadata, err error, +) { + return h.With.HandleBuild(ctx, in, h.Next) +} + +// BuildHandlerFunc provides a wrapper around a function to be used as a build middleware handler. +type BuildHandlerFunc func(context.Context, BuildInput) (BuildOutput, Metadata, error) + +// HandleBuild invokes the wrapped function with the provided arguments. +func (b BuildHandlerFunc) HandleBuild(ctx context.Context, in BuildInput) (BuildOutput, Metadata, error) { + return b(ctx, in) +} + +var _ BuildHandler = BuildHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go new file mode 100644 index 00000000000..44860721571 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_deserialize.go @@ -0,0 +1,217 @@ +package middleware + +import ( + "context" +) + +// DeserializeInput provides the input parameters for the DeserializeInput to +// consume. DeserializeMiddleware should not modify the Request, and instead +// forward it along to the next DeserializeHandler. +type DeserializeInput struct { + Request interface{} +} + +// DeserializeOutput provides the result returned by the next +// DeserializeHandler. The DeserializeMiddleware should deserialize the +// RawResponse into a Result that can be consumed by middleware higher up in +// the stack. +type DeserializeOutput struct { + RawResponse interface{} + Result interface{} +} + +// DeserializeHandler provides the interface for the next handler the +// DeserializeMiddleware will call in the middleware chain. +type DeserializeHandler interface { + HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next DeserializeHandler for further +// processing. +type DeserializeMiddleware interface { + // ID returns a unique ID for the middleware in the DeserializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleDeserialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, + ) +} + +// DeserializeMiddlewareFunc returns a DeserializeMiddleware with the unique ID +// provided, and the func to be invoked. +func DeserializeMiddlewareFunc(id string, fn func(context.Context, DeserializeInput, DeserializeHandler) (DeserializeOutput, Metadata, error)) DeserializeMiddleware { + return deserializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type deserializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, DeserializeInput, DeserializeHandler) ( + DeserializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s deserializeMiddlewareFunc) ID() string { return s.id } + +// HandleDeserialize invokes the middleware Fn. +func (s deserializeMiddlewareFunc) HandleDeserialize(ctx context.Context, in DeserializeInput, next DeserializeHandler) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ DeserializeMiddleware = (deserializeMiddlewareFunc{}) + +// DeserializeStep provides the ordered grouping of DeserializeMiddleware to be +// invoked on a handler. +type DeserializeStep struct { + ids *orderedIDs +} + +// NewDeserializeStep returns a DeserializeStep ready to have middleware for +// initialization added to it. +func NewDeserializeStep() *DeserializeStep { + return &DeserializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*DeserializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *DeserializeStep) ID() string { + return "Deserialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *DeserializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h DeserializeHandler = deserializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedDeserializeHandler{ + Next: h, + With: order[i].(DeserializeMiddleware), + } + } + + sIn := DeserializeInput{ + Request: in, + } + + res, metadata, err := h.HandleDeserialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *DeserializeStep) Get(id string) (DeserializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(DeserializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *DeserializeStep) Add(m DeserializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *DeserializeStep) Insert(m DeserializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *DeserializeStep) Swap(id string, m DeserializeMiddleware) (DeserializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *DeserializeStep) Remove(id string) (DeserializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(DeserializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *DeserializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *DeserializeStep) Clear() { + s.ids.Clear() +} + +type deserializeWrapHandler struct { + Next Handler +} + +var _ DeserializeHandler = (*deserializeWrapHandler)(nil) + +// HandleDeserialize implements DeserializeHandler, converts types and delegates to underlying +// generic handler. +func (w deserializeWrapHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + resp, metadata, err := w.Next.Handle(ctx, in.Request) + return DeserializeOutput{ + RawResponse: resp, + }, metadata, err +} + +type decoratedDeserializeHandler struct { + Next DeserializeHandler + With DeserializeMiddleware +} + +var _ DeserializeHandler = (*decoratedDeserializeHandler)(nil) + +func (h decoratedDeserializeHandler) HandleDeserialize(ctx context.Context, in DeserializeInput) ( + out DeserializeOutput, metadata Metadata, err error, +) { + return h.With.HandleDeserialize(ctx, in, h.Next) +} + +// DeserializeHandlerFunc provides a wrapper around a function to be used as a deserialize middleware handler. +type DeserializeHandlerFunc func(context.Context, DeserializeInput) (DeserializeOutput, Metadata, error) + +// HandleDeserialize invokes the wrapped function with the given arguments. +func (d DeserializeHandlerFunc) HandleDeserialize(ctx context.Context, in DeserializeInput) (DeserializeOutput, Metadata, error) { + return d(ctx, in) +} + +var _ DeserializeHandler = DeserializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_finalize.go b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go new file mode 100644 index 00000000000..065e3885de9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_finalize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// FinalizeInput provides the input parameters for the FinalizeMiddleware to +// consume. FinalizeMiddleware may modify the Request value before forwarding +// the FinalizeInput along to the next next FinalizeHandler. +type FinalizeInput struct { + Request interface{} +} + +// FinalizeOutput provides the result returned by the next FinalizeHandler. +type FinalizeOutput struct { + Result interface{} +} + +// FinalizeHandler provides the interface for the next handler the +// FinalizeMiddleware will call in the middleware chain. +type FinalizeHandler interface { + HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next FinalizeHandler for further +// processing. +type FinalizeMiddleware interface { + // ID returns a unique ID for the middleware in the FinalizeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleFinalize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, + ) +} + +// FinalizeMiddlewareFunc returns a FinalizeMiddleware with the unique ID +// provided, and the func to be invoked. +func FinalizeMiddlewareFunc(id string, fn func(context.Context, FinalizeInput, FinalizeHandler) (FinalizeOutput, Metadata, error)) FinalizeMiddleware { + return finalizeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type finalizeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, FinalizeInput, FinalizeHandler) ( + FinalizeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s finalizeMiddlewareFunc) ID() string { return s.id } + +// HandleFinalize invokes the middleware Fn. +func (s finalizeMiddlewareFunc) HandleFinalize(ctx context.Context, in FinalizeInput, next FinalizeHandler) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ FinalizeMiddleware = (finalizeMiddlewareFunc{}) + +// FinalizeStep provides the ordered grouping of FinalizeMiddleware to be +// invoked on a handler. +type FinalizeStep struct { + ids *orderedIDs +} + +// NewFinalizeStep returns a FinalizeStep ready to have middleware for +// initialization added to it. +func NewFinalizeStep() *FinalizeStep { + return &FinalizeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*FinalizeStep)(nil) + +// ID returns the unique id of the step as a middleware. +func (s *FinalizeStep) ID() string { + return "Finalize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *FinalizeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h FinalizeHandler = finalizeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedFinalizeHandler{ + Next: h, + With: order[i].(FinalizeMiddleware), + } + } + + sIn := FinalizeInput{ + Request: in, + } + + res, metadata, err := h.HandleFinalize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *FinalizeStep) Get(id string) (FinalizeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(FinalizeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *FinalizeStep) Add(m FinalizeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *FinalizeStep) Insert(m FinalizeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *FinalizeStep) Swap(id string, m FinalizeMiddleware) (FinalizeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *FinalizeStep) Remove(id string) (FinalizeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(FinalizeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *FinalizeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *FinalizeStep) Clear() { + s.ids.Clear() +} + +type finalizeWrapHandler struct { + Next Handler +} + +var _ FinalizeHandler = (*finalizeWrapHandler)(nil) + +// HandleFinalize implements FinalizeHandler, converts types and delegates to underlying +// generic handler. +func (w finalizeWrapHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return FinalizeOutput{ + Result: res, + }, metadata, err +} + +type decoratedFinalizeHandler struct { + Next FinalizeHandler + With FinalizeMiddleware +} + +var _ FinalizeHandler = (*decoratedFinalizeHandler)(nil) + +func (h decoratedFinalizeHandler) HandleFinalize(ctx context.Context, in FinalizeInput) ( + out FinalizeOutput, metadata Metadata, err error, +) { + return h.With.HandleFinalize(ctx, in, h.Next) +} + +// FinalizeHandlerFunc provides a wrapper around a function to be used as a finalize middleware handler. +type FinalizeHandlerFunc func(context.Context, FinalizeInput) (FinalizeOutput, Metadata, error) + +// HandleFinalize invokes the wrapped function with the given arguments. +func (f FinalizeHandlerFunc) HandleFinalize(ctx context.Context, in FinalizeInput) (FinalizeOutput, Metadata, error) { + return f(ctx, in) +} + +var _ FinalizeHandler = FinalizeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_initialize.go b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go new file mode 100644 index 00000000000..fe359144d24 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_initialize.go @@ -0,0 +1,211 @@ +package middleware + +import "context" + +// InitializeInput wraps the input parameters for the InitializeMiddlewares to +// consume. InitializeMiddleware may modify the parameter value before +// forwarding it along to the next InitializeHandler. +type InitializeInput struct { + Parameters interface{} +} + +// InitializeOutput provides the result returned by the next InitializeHandler. +type InitializeOutput struct { + Result interface{} +} + +// InitializeHandler provides the interface for the next handler the +// InitializeMiddleware will call in the middleware chain. +type InitializeHandler interface { + HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddleware provides the interface for middleware specific to the +// initialize step. Delegates to the next InitializeHandler for further +// processing. +type InitializeMiddleware interface { + // ID returns a unique ID for the middleware in the InitializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleInitialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, + ) +} + +// InitializeMiddlewareFunc returns a InitializeMiddleware with the unique ID provided, +// and the func to be invoked. +func InitializeMiddlewareFunc(id string, fn func(context.Context, InitializeInput, InitializeHandler) (InitializeOutput, Metadata, error)) InitializeMiddleware { + return initializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type initializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, InitializeInput, InitializeHandler) ( + InitializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s initializeMiddlewareFunc) ID() string { return s.id } + +// HandleInitialize invokes the middleware Fn. +func (s initializeMiddlewareFunc) HandleInitialize(ctx context.Context, in InitializeInput, next InitializeHandler) ( + out InitializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ InitializeMiddleware = (initializeMiddlewareFunc{}) + +// InitializeStep provides the ordered grouping of InitializeMiddleware to be +// invoked on a handler. +type InitializeStep struct { + ids *orderedIDs +} + +// NewInitializeStep returns an InitializeStep ready to have middleware for +// initialization added to it. +func NewInitializeStep() *InitializeStep { + return &InitializeStep{ + ids: newOrderedIDs(), + } +} + +var _ Middleware = (*InitializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *InitializeStep) ID() string { + return "Initialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *InitializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h InitializeHandler = initializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedInitializeHandler{ + Next: h, + With: order[i].(InitializeMiddleware), + } + } + + sIn := InitializeInput{ + Parameters: in, + } + + res, metadata, err := h.HandleInitialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *InitializeStep) Get(id string) (InitializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(InitializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *InitializeStep) Add(m InitializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *InitializeStep) Insert(m InitializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *InitializeStep) Swap(id string, m InitializeMiddleware) (InitializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *InitializeStep) Remove(id string) (InitializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(InitializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *InitializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *InitializeStep) Clear() { + s.ids.Clear() +} + +type initializeWrapHandler struct { + Next Handler +} + +var _ InitializeHandler = (*initializeWrapHandler)(nil) + +// HandleInitialize implements InitializeHandler, converts types and delegates to underlying +// generic handler. +func (w initializeWrapHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Parameters) + return InitializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedInitializeHandler struct { + Next InitializeHandler + With InitializeMiddleware +} + +var _ InitializeHandler = (*decoratedInitializeHandler)(nil) + +func (h decoratedInitializeHandler) HandleInitialize(ctx context.Context, in InitializeInput) ( + out InitializeOutput, metadata Metadata, err error, +) { + return h.With.HandleInitialize(ctx, in, h.Next) +} + +// InitializeHandlerFunc provides a wrapper around a function to be used as an initialize middleware handler. +type InitializeHandlerFunc func(context.Context, InitializeInput) (InitializeOutput, Metadata, error) + +// HandleInitialize calls the wrapped function with the provided arguments. +func (i InitializeHandlerFunc) HandleInitialize(ctx context.Context, in InitializeInput) (InitializeOutput, Metadata, error) { + return i(ctx, in) +} + +var _ InitializeHandler = InitializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/middleware/step_serialize.go b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go new file mode 100644 index 00000000000..114bafcedea --- /dev/null +++ b/vendor/github.com/aws/smithy-go/middleware/step_serialize.go @@ -0,0 +1,219 @@ +package middleware + +import "context" + +// SerializeInput provides the input parameters for the SerializeMiddleware to +// consume. SerializeMiddleware may modify the Request value before forwarding +// SerializeInput along to the next SerializeHandler. The Parameters member +// should not be modified by SerializeMiddleware, InitializeMiddleware should +// be responsible for modifying the provided Parameter value. +type SerializeInput struct { + Parameters interface{} + Request interface{} +} + +// SerializeOutput provides the result returned by the next SerializeHandler. +type SerializeOutput struct { + Result interface{} +} + +// SerializeHandler provides the interface for the next handler the +// SerializeMiddleware will call in the middleware chain. +type SerializeHandler interface { + HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddleware provides the interface for middleware specific to the +// serialize step. Delegates to the next SerializeHandler for further +// processing. +type SerializeMiddleware interface { + // ID returns a unique ID for the middleware in the SerializeStep. The step does not + // allow duplicate IDs. + ID() string + + // HandleSerialize invokes the middleware behavior which must delegate to the next handler + // for the middleware chain to continue. The method must return a result or + // error to its caller. + HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, + ) +} + +// SerializeMiddlewareFunc returns a SerializeMiddleware with the unique ID +// provided, and the func to be invoked. +func SerializeMiddlewareFunc(id string, fn func(context.Context, SerializeInput, SerializeHandler) (SerializeOutput, Metadata, error)) SerializeMiddleware { + return serializeMiddlewareFunc{ + id: id, + fn: fn, + } +} + +type serializeMiddlewareFunc struct { + // Unique ID for the middleware. + id string + + // Middleware function to be called. + fn func(context.Context, SerializeInput, SerializeHandler) ( + SerializeOutput, Metadata, error, + ) +} + +// ID returns the unique ID for the middleware. +func (s serializeMiddlewareFunc) ID() string { return s.id } + +// HandleSerialize invokes the middleware Fn. +func (s serializeMiddlewareFunc) HandleSerialize(ctx context.Context, in SerializeInput, next SerializeHandler) ( + out SerializeOutput, metadata Metadata, err error, +) { + return s.fn(ctx, in, next) +} + +var _ SerializeMiddleware = (serializeMiddlewareFunc{}) + +// SerializeStep provides the ordered grouping of SerializeMiddleware to be +// invoked on a handler. +type SerializeStep struct { + newRequest func() interface{} + ids *orderedIDs +} + +// NewSerializeStep returns a SerializeStep ready to have middleware for +// initialization added to it. The newRequest func parameter is used to +// initialize the transport specific request for the stack SerializeStep to +// serialize the input parameters into. +func NewSerializeStep(newRequest func() interface{}) *SerializeStep { + return &SerializeStep{ + ids: newOrderedIDs(), + newRequest: newRequest, + } +} + +var _ Middleware = (*SerializeStep)(nil) + +// ID returns the unique ID of the step as a middleware. +func (s *SerializeStep) ID() string { + return "Serialize stack step" +} + +// HandleMiddleware invokes the middleware by decorating the next handler +// provided. Returns the result of the middleware and handler being invoked. +// +// Implements Middleware interface. +func (s *SerializeStep) HandleMiddleware(ctx context.Context, in interface{}, next Handler) ( + out interface{}, metadata Metadata, err error, +) { + order := s.ids.GetOrder() + + var h SerializeHandler = serializeWrapHandler{Next: next} + for i := len(order) - 1; i >= 0; i-- { + h = decoratedSerializeHandler{ + Next: h, + With: order[i].(SerializeMiddleware), + } + } + + sIn := SerializeInput{ + Parameters: in, + Request: s.newRequest(), + } + + res, metadata, err := h.HandleSerialize(ctx, sIn) + return res.Result, metadata, err +} + +// Get retrieves the middleware identified by id. If the middleware is not present, returns false. +func (s *SerializeStep) Get(id string) (SerializeMiddleware, bool) { + get, ok := s.ids.Get(id) + if !ok { + return nil, false + } + return get.(SerializeMiddleware), ok +} + +// Add injects the middleware to the relative position of the middleware group. +// Returns an error if the middleware already exists. +func (s *SerializeStep) Add(m SerializeMiddleware, pos RelativePosition) error { + return s.ids.Add(m, pos) +} + +// Insert injects the middleware relative to an existing middleware ID. +// Returns error if the original middleware does not exist, or the middleware +// being added already exists. +func (s *SerializeStep) Insert(m SerializeMiddleware, relativeTo string, pos RelativePosition) error { + return s.ids.Insert(m, relativeTo, pos) +} + +// Swap removes the middleware by id, replacing it with the new middleware. +// Returns the middleware removed, or error if the middleware to be removed +// doesn't exist. +func (s *SerializeStep) Swap(id string, m SerializeMiddleware) (SerializeMiddleware, error) { + removed, err := s.ids.Swap(id, m) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// Remove removes the middleware by id. Returns error if the middleware +// doesn't exist. +func (s *SerializeStep) Remove(id string) (SerializeMiddleware, error) { + removed, err := s.ids.Remove(id) + if err != nil { + return nil, err + } + + return removed.(SerializeMiddleware), nil +} + +// List returns a list of the middleware in the step. +func (s *SerializeStep) List() []string { + return s.ids.List() +} + +// Clear removes all middleware in the step. +func (s *SerializeStep) Clear() { + s.ids.Clear() +} + +type serializeWrapHandler struct { + Next Handler +} + +var _ SerializeHandler = (*serializeWrapHandler)(nil) + +// Implements SerializeHandler, converts types and delegates to underlying +// generic handler. +func (w serializeWrapHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + res, metadata, err := w.Next.Handle(ctx, in.Request) + return SerializeOutput{ + Result: res, + }, metadata, err +} + +type decoratedSerializeHandler struct { + Next SerializeHandler + With SerializeMiddleware +} + +var _ SerializeHandler = (*decoratedSerializeHandler)(nil) + +func (h decoratedSerializeHandler) HandleSerialize(ctx context.Context, in SerializeInput) ( + out SerializeOutput, metadata Metadata, err error, +) { + return h.With.HandleSerialize(ctx, in, h.Next) +} + +// SerializeHandlerFunc provides a wrapper around a function to be used as a serialize middleware handler. +type SerializeHandlerFunc func(context.Context, SerializeInput) (SerializeOutput, Metadata, error) + +// HandleSerialize calls the wrapped function with the provided arguments. +func (s SerializeHandlerFunc) HandleSerialize(ctx context.Context, in SerializeInput) (SerializeOutput, Metadata, error) { + return s(ctx, in) +} + +var _ SerializeHandler = SerializeHandlerFunc(nil) diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml new file mode 100644 index 00000000000..20295cdd2aa --- /dev/null +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -0,0 +1,11 @@ +[dependencies] + "github.com/google/go-cmp" = "v0.5.8" + "github.com/jmespath/go-jmespath" = "v0.4.0" + +[modules] + + [modules.codegen] + no_tag = true + + [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] + no_tag = true diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go new file mode 100644 index 00000000000..17d659c539e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/properties.go @@ -0,0 +1,52 @@ +package smithy + +// PropertiesReader provides an interface for reading metadata from the +// underlying metadata container. +type PropertiesReader interface { + Get(key interface{}) interface{} +} + +// Properties provides storing and reading metadata values. Keys may be any +// comparable value type. Get and set will panic if key is not a comparable +// value type. +// +// Properties uses lazy initialization, and Set method must be called as an +// addressable value, or pointer. Not doing so may cause key/value pair to not +// be set. +type Properties struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m *Properties) Get(key interface{}) interface{} { + return m.values[key] +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Set method must be called as an addressable value, or pointer. If Set is not +// called as an addressable value or pointer, the key value pair being set may +// be lost. +// +// Panics if the key type is not comparable. +func (m *Properties) Set(key, value interface{}) { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m *Properties) Has(key interface{}) bool { + if m.values == nil { + return false + } + _, ok := m.values[key] + return ok +} diff --git a/vendor/github.com/aws/smithy-go/ptr/doc.go b/vendor/github.com/aws/smithy-go/ptr/doc.go new file mode 100644 index 00000000000..bc1f6996161 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/doc.go @@ -0,0 +1,5 @@ +// Package ptr provides utilities for converting scalar literal type values to and from pointers inline. +package ptr + +//go:generate go run -tags codegen generate.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/smithy-go/ptr/from_ptr.go b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go new file mode 100644 index 00000000000..a2845bb2c80 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/from_ptr.go @@ -0,0 +1,601 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// ToBool returns bool value dereferenced if the passed +// in pointer was not nil. Returns a bool zero value if the +// pointer was nil. +func ToBool(p *bool) (v bool) { + if p == nil { + return v + } + + return *p +} + +// ToBoolSlice returns a slice of bool values, that are +// dereferenced if the passed in pointer was not nil. Returns a bool +// zero value if the pointer was nil. +func ToBoolSlice(vs []*bool) []bool { + ps := make([]bool, len(vs)) + for i, v := range vs { + ps[i] = ToBool(v) + } + + return ps +} + +// ToBoolMap returns a map of bool values, that are +// dereferenced if the passed in pointer was not nil. The bool +// zero value is used if the pointer was nil. +func ToBoolMap(vs map[string]*bool) map[string]bool { + ps := make(map[string]bool, len(vs)) + for k, v := range vs { + ps[k] = ToBool(v) + } + + return ps +} + +// ToByte returns byte value dereferenced if the passed +// in pointer was not nil. Returns a byte zero value if the +// pointer was nil. +func ToByte(p *byte) (v byte) { + if p == nil { + return v + } + + return *p +} + +// ToByteSlice returns a slice of byte values, that are +// dereferenced if the passed in pointer was not nil. Returns a byte +// zero value if the pointer was nil. +func ToByteSlice(vs []*byte) []byte { + ps := make([]byte, len(vs)) + for i, v := range vs { + ps[i] = ToByte(v) + } + + return ps +} + +// ToByteMap returns a map of byte values, that are +// dereferenced if the passed in pointer was not nil. The byte +// zero value is used if the pointer was nil. +func ToByteMap(vs map[string]*byte) map[string]byte { + ps := make(map[string]byte, len(vs)) + for k, v := range vs { + ps[k] = ToByte(v) + } + + return ps +} + +// ToString returns string value dereferenced if the passed +// in pointer was not nil. Returns a string zero value if the +// pointer was nil. +func ToString(p *string) (v string) { + if p == nil { + return v + } + + return *p +} + +// ToStringSlice returns a slice of string values, that are +// dereferenced if the passed in pointer was not nil. Returns a string +// zero value if the pointer was nil. +func ToStringSlice(vs []*string) []string { + ps := make([]string, len(vs)) + for i, v := range vs { + ps[i] = ToString(v) + } + + return ps +} + +// ToStringMap returns a map of string values, that are +// dereferenced if the passed in pointer was not nil. The string +// zero value is used if the pointer was nil. +func ToStringMap(vs map[string]*string) map[string]string { + ps := make(map[string]string, len(vs)) + for k, v := range vs { + ps[k] = ToString(v) + } + + return ps +} + +// ToInt returns int value dereferenced if the passed +// in pointer was not nil. Returns a int zero value if the +// pointer was nil. +func ToInt(p *int) (v int) { + if p == nil { + return v + } + + return *p +} + +// ToIntSlice returns a slice of int values, that are +// dereferenced if the passed in pointer was not nil. Returns a int +// zero value if the pointer was nil. +func ToIntSlice(vs []*int) []int { + ps := make([]int, len(vs)) + for i, v := range vs { + ps[i] = ToInt(v) + } + + return ps +} + +// ToIntMap returns a map of int values, that are +// dereferenced if the passed in pointer was not nil. The int +// zero value is used if the pointer was nil. +func ToIntMap(vs map[string]*int) map[string]int { + ps := make(map[string]int, len(vs)) + for k, v := range vs { + ps[k] = ToInt(v) + } + + return ps +} + +// ToInt8 returns int8 value dereferenced if the passed +// in pointer was not nil. Returns a int8 zero value if the +// pointer was nil. +func ToInt8(p *int8) (v int8) { + if p == nil { + return v + } + + return *p +} + +// ToInt8Slice returns a slice of int8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int8 +// zero value if the pointer was nil. +func ToInt8Slice(vs []*int8) []int8 { + ps := make([]int8, len(vs)) + for i, v := range vs { + ps[i] = ToInt8(v) + } + + return ps +} + +// ToInt8Map returns a map of int8 values, that are +// dereferenced if the passed in pointer was not nil. The int8 +// zero value is used if the pointer was nil. +func ToInt8Map(vs map[string]*int8) map[string]int8 { + ps := make(map[string]int8, len(vs)) + for k, v := range vs { + ps[k] = ToInt8(v) + } + + return ps +} + +// ToInt16 returns int16 value dereferenced if the passed +// in pointer was not nil. Returns a int16 zero value if the +// pointer was nil. +func ToInt16(p *int16) (v int16) { + if p == nil { + return v + } + + return *p +} + +// ToInt16Slice returns a slice of int16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int16 +// zero value if the pointer was nil. +func ToInt16Slice(vs []*int16) []int16 { + ps := make([]int16, len(vs)) + for i, v := range vs { + ps[i] = ToInt16(v) + } + + return ps +} + +// ToInt16Map returns a map of int16 values, that are +// dereferenced if the passed in pointer was not nil. The int16 +// zero value is used if the pointer was nil. +func ToInt16Map(vs map[string]*int16) map[string]int16 { + ps := make(map[string]int16, len(vs)) + for k, v := range vs { + ps[k] = ToInt16(v) + } + + return ps +} + +// ToInt32 returns int32 value dereferenced if the passed +// in pointer was not nil. Returns a int32 zero value if the +// pointer was nil. +func ToInt32(p *int32) (v int32) { + if p == nil { + return v + } + + return *p +} + +// ToInt32Slice returns a slice of int32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int32 +// zero value if the pointer was nil. +func ToInt32Slice(vs []*int32) []int32 { + ps := make([]int32, len(vs)) + for i, v := range vs { + ps[i] = ToInt32(v) + } + + return ps +} + +// ToInt32Map returns a map of int32 values, that are +// dereferenced if the passed in pointer was not nil. The int32 +// zero value is used if the pointer was nil. +func ToInt32Map(vs map[string]*int32) map[string]int32 { + ps := make(map[string]int32, len(vs)) + for k, v := range vs { + ps[k] = ToInt32(v) + } + + return ps +} + +// ToInt64 returns int64 value dereferenced if the passed +// in pointer was not nil. Returns a int64 zero value if the +// pointer was nil. +func ToInt64(p *int64) (v int64) { + if p == nil { + return v + } + + return *p +} + +// ToInt64Slice returns a slice of int64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a int64 +// zero value if the pointer was nil. +func ToInt64Slice(vs []*int64) []int64 { + ps := make([]int64, len(vs)) + for i, v := range vs { + ps[i] = ToInt64(v) + } + + return ps +} + +// ToInt64Map returns a map of int64 values, that are +// dereferenced if the passed in pointer was not nil. The int64 +// zero value is used if the pointer was nil. +func ToInt64Map(vs map[string]*int64) map[string]int64 { + ps := make(map[string]int64, len(vs)) + for k, v := range vs { + ps[k] = ToInt64(v) + } + + return ps +} + +// ToUint returns uint value dereferenced if the passed +// in pointer was not nil. Returns a uint zero value if the +// pointer was nil. +func ToUint(p *uint) (v uint) { + if p == nil { + return v + } + + return *p +} + +// ToUintSlice returns a slice of uint values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint +// zero value if the pointer was nil. +func ToUintSlice(vs []*uint) []uint { + ps := make([]uint, len(vs)) + for i, v := range vs { + ps[i] = ToUint(v) + } + + return ps +} + +// ToUintMap returns a map of uint values, that are +// dereferenced if the passed in pointer was not nil. The uint +// zero value is used if the pointer was nil. +func ToUintMap(vs map[string]*uint) map[string]uint { + ps := make(map[string]uint, len(vs)) + for k, v := range vs { + ps[k] = ToUint(v) + } + + return ps +} + +// ToUint8 returns uint8 value dereferenced if the passed +// in pointer was not nil. Returns a uint8 zero value if the +// pointer was nil. +func ToUint8(p *uint8) (v uint8) { + if p == nil { + return v + } + + return *p +} + +// ToUint8Slice returns a slice of uint8 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint8 +// zero value if the pointer was nil. +func ToUint8Slice(vs []*uint8) []uint8 { + ps := make([]uint8, len(vs)) + for i, v := range vs { + ps[i] = ToUint8(v) + } + + return ps +} + +// ToUint8Map returns a map of uint8 values, that are +// dereferenced if the passed in pointer was not nil. The uint8 +// zero value is used if the pointer was nil. +func ToUint8Map(vs map[string]*uint8) map[string]uint8 { + ps := make(map[string]uint8, len(vs)) + for k, v := range vs { + ps[k] = ToUint8(v) + } + + return ps +} + +// ToUint16 returns uint16 value dereferenced if the passed +// in pointer was not nil. Returns a uint16 zero value if the +// pointer was nil. +func ToUint16(p *uint16) (v uint16) { + if p == nil { + return v + } + + return *p +} + +// ToUint16Slice returns a slice of uint16 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint16 +// zero value if the pointer was nil. +func ToUint16Slice(vs []*uint16) []uint16 { + ps := make([]uint16, len(vs)) + for i, v := range vs { + ps[i] = ToUint16(v) + } + + return ps +} + +// ToUint16Map returns a map of uint16 values, that are +// dereferenced if the passed in pointer was not nil. The uint16 +// zero value is used if the pointer was nil. +func ToUint16Map(vs map[string]*uint16) map[string]uint16 { + ps := make(map[string]uint16, len(vs)) + for k, v := range vs { + ps[k] = ToUint16(v) + } + + return ps +} + +// ToUint32 returns uint32 value dereferenced if the passed +// in pointer was not nil. Returns a uint32 zero value if the +// pointer was nil. +func ToUint32(p *uint32) (v uint32) { + if p == nil { + return v + } + + return *p +} + +// ToUint32Slice returns a slice of uint32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint32 +// zero value if the pointer was nil. +func ToUint32Slice(vs []*uint32) []uint32 { + ps := make([]uint32, len(vs)) + for i, v := range vs { + ps[i] = ToUint32(v) + } + + return ps +} + +// ToUint32Map returns a map of uint32 values, that are +// dereferenced if the passed in pointer was not nil. The uint32 +// zero value is used if the pointer was nil. +func ToUint32Map(vs map[string]*uint32) map[string]uint32 { + ps := make(map[string]uint32, len(vs)) + for k, v := range vs { + ps[k] = ToUint32(v) + } + + return ps +} + +// ToUint64 returns uint64 value dereferenced if the passed +// in pointer was not nil. Returns a uint64 zero value if the +// pointer was nil. +func ToUint64(p *uint64) (v uint64) { + if p == nil { + return v + } + + return *p +} + +// ToUint64Slice returns a slice of uint64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a uint64 +// zero value if the pointer was nil. +func ToUint64Slice(vs []*uint64) []uint64 { + ps := make([]uint64, len(vs)) + for i, v := range vs { + ps[i] = ToUint64(v) + } + + return ps +} + +// ToUint64Map returns a map of uint64 values, that are +// dereferenced if the passed in pointer was not nil. The uint64 +// zero value is used if the pointer was nil. +func ToUint64Map(vs map[string]*uint64) map[string]uint64 { + ps := make(map[string]uint64, len(vs)) + for k, v := range vs { + ps[k] = ToUint64(v) + } + + return ps +} + +// ToFloat32 returns float32 value dereferenced if the passed +// in pointer was not nil. Returns a float32 zero value if the +// pointer was nil. +func ToFloat32(p *float32) (v float32) { + if p == nil { + return v + } + + return *p +} + +// ToFloat32Slice returns a slice of float32 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float32 +// zero value if the pointer was nil. +func ToFloat32Slice(vs []*float32) []float32 { + ps := make([]float32, len(vs)) + for i, v := range vs { + ps[i] = ToFloat32(v) + } + + return ps +} + +// ToFloat32Map returns a map of float32 values, that are +// dereferenced if the passed in pointer was not nil. The float32 +// zero value is used if the pointer was nil. +func ToFloat32Map(vs map[string]*float32) map[string]float32 { + ps := make(map[string]float32, len(vs)) + for k, v := range vs { + ps[k] = ToFloat32(v) + } + + return ps +} + +// ToFloat64 returns float64 value dereferenced if the passed +// in pointer was not nil. Returns a float64 zero value if the +// pointer was nil. +func ToFloat64(p *float64) (v float64) { + if p == nil { + return v + } + + return *p +} + +// ToFloat64Slice returns a slice of float64 values, that are +// dereferenced if the passed in pointer was not nil. Returns a float64 +// zero value if the pointer was nil. +func ToFloat64Slice(vs []*float64) []float64 { + ps := make([]float64, len(vs)) + for i, v := range vs { + ps[i] = ToFloat64(v) + } + + return ps +} + +// ToFloat64Map returns a map of float64 values, that are +// dereferenced if the passed in pointer was not nil. The float64 +// zero value is used if the pointer was nil. +func ToFloat64Map(vs map[string]*float64) map[string]float64 { + ps := make(map[string]float64, len(vs)) + for k, v := range vs { + ps[k] = ToFloat64(v) + } + + return ps +} + +// ToTime returns time.Time value dereferenced if the passed +// in pointer was not nil. Returns a time.Time zero value if the +// pointer was nil. +func ToTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} + +// ToTimeSlice returns a slice of time.Time values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Time +// zero value if the pointer was nil. +func ToTimeSlice(vs []*time.Time) []time.Time { + ps := make([]time.Time, len(vs)) + for i, v := range vs { + ps[i] = ToTime(v) + } + + return ps +} + +// ToTimeMap returns a map of time.Time values, that are +// dereferenced if the passed in pointer was not nil. The time.Time +// zero value is used if the pointer was nil. +func ToTimeMap(vs map[string]*time.Time) map[string]time.Time { + ps := make(map[string]time.Time, len(vs)) + for k, v := range vs { + ps[k] = ToTime(v) + } + + return ps +} + +// ToDuration returns time.Duration value dereferenced if the passed +// in pointer was not nil. Returns a time.Duration zero value if the +// pointer was nil. +func ToDuration(p *time.Duration) (v time.Duration) { + if p == nil { + return v + } + + return *p +} + +// ToDurationSlice returns a slice of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. Returns a time.Duration +// zero value if the pointer was nil. +func ToDurationSlice(vs []*time.Duration) []time.Duration { + ps := make([]time.Duration, len(vs)) + for i, v := range vs { + ps[i] = ToDuration(v) + } + + return ps +} + +// ToDurationMap returns a map of time.Duration values, that are +// dereferenced if the passed in pointer was not nil. The time.Duration +// zero value is used if the pointer was nil. +func ToDurationMap(vs map[string]*time.Duration) map[string]time.Duration { + ps := make(map[string]time.Duration, len(vs)) + for k, v := range vs { + ps[k] = ToDuration(v) + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go new file mode 100644 index 00000000000..97f01011e7e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/gen_scalars.go @@ -0,0 +1,83 @@ +//go:build codegen +// +build codegen + +package ptr + +import "strings" + +func GetScalars() Scalars { + return Scalars{ + {Type: "bool"}, + {Type: "byte"}, + {Type: "string"}, + {Type: "int"}, + {Type: "int8"}, + {Type: "int16"}, + {Type: "int32"}, + {Type: "int64"}, + {Type: "uint"}, + {Type: "uint8"}, + {Type: "uint16"}, + {Type: "uint32"}, + {Type: "uint64"}, + {Type: "float32"}, + {Type: "float64"}, + {Type: "Time", Import: &Import{Path: "time"}}, + {Type: "Duration", Import: &Import{Path: "time"}}, + } +} + +// Import provides the import path and optional alias +type Import struct { + Path string + Alias string +} + +// Package returns the Go package name for the import. Returns alias if set. +func (i Import) Package() string { + if v := i.Alias; len(v) != 0 { + return v + } + + if v := i.Path; len(v) != 0 { + parts := strings.Split(v, "/") + pkg := parts[len(parts)-1] + return pkg + } + + return "" +} + +// Scalar provides the definition of a type to generate pointer utilities for. +type Scalar struct { + Type string + Import *Import +} + +// Name returns the exported function name for the type. +func (t Scalar) Name() string { + return strings.Title(t.Type) +} + +// Symbol returns the scalar's Go symbol with path if needed. +func (t Scalar) Symbol() string { + if t.Import != nil { + return t.Import.Package() + "." + t.Type + } + return t.Type +} + +// Scalars is a list of scalars. +type Scalars []Scalar + +// Imports returns all imports for the scalars. +func (ts Scalars) Imports() []*Import { + imports := []*Import{} + for _, t := range ts { + if v := t.Import; v != nil { + imports = append(imports, v) + } + } + + return imports +} diff --git a/vendor/github.com/aws/smithy-go/ptr/to_ptr.go b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go new file mode 100644 index 00000000000..0bfbbecbdce --- /dev/null +++ b/vendor/github.com/aws/smithy-go/ptr/to_ptr.go @@ -0,0 +1,499 @@ +// Code generated by smithy-go/ptr/generate.go DO NOT EDIT. +package ptr + +import ( + "time" +) + +// Bool returns a pointer value for the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolSlice returns a slice of bool pointers from the values +// passed in. +func BoolSlice(vs []bool) []*bool { + ps := make([]*bool, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// BoolMap returns a map of bool pointers from the values +// passed in. +func BoolMap(vs map[string]bool) map[string]*bool { + ps := make(map[string]*bool, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Byte returns a pointer value for the byte value passed in. +func Byte(v byte) *byte { + return &v +} + +// ByteSlice returns a slice of byte pointers from the values +// passed in. +func ByteSlice(vs []byte) []*byte { + ps := make([]*byte, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// ByteMap returns a map of byte pointers from the values +// passed in. +func ByteMap(vs map[string]byte) map[string]*byte { + ps := make(map[string]*byte, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// String returns a pointer value for the string value passed in. +func String(v string) *string { + return &v +} + +// StringSlice returns a slice of string pointers from the values +// passed in. +func StringSlice(vs []string) []*string { + ps := make([]*string, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// StringMap returns a map of string pointers from the values +// passed in. +func StringMap(vs map[string]string) map[string]*string { + ps := make(map[string]*string, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int returns a pointer value for the int value passed in. +func Int(v int) *int { + return &v +} + +// IntSlice returns a slice of int pointers from the values +// passed in. +func IntSlice(vs []int) []*int { + ps := make([]*int, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// IntMap returns a map of int pointers from the values +// passed in. +func IntMap(vs map[string]int) map[string]*int { + ps := make(map[string]*int, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int8 returns a pointer value for the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Slice returns a slice of int8 pointers from the values +// passed in. +func Int8Slice(vs []int8) []*int8 { + ps := make([]*int8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int8Map returns a map of int8 pointers from the values +// passed in. +func Int8Map(vs map[string]int8) map[string]*int8 { + ps := make(map[string]*int8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int16 returns a pointer value for the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Slice returns a slice of int16 pointers from the values +// passed in. +func Int16Slice(vs []int16) []*int16 { + ps := make([]*int16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int16Map returns a map of int16 pointers from the values +// passed in. +func Int16Map(vs map[string]int16) map[string]*int16 { + ps := make(map[string]*int16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int32 returns a pointer value for the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Slice returns a slice of int32 pointers from the values +// passed in. +func Int32Slice(vs []int32) []*int32 { + ps := make([]*int32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int32Map returns a map of int32 pointers from the values +// passed in. +func Int32Map(vs map[string]int32) map[string]*int32 { + ps := make(map[string]*int32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Int64 returns a pointer value for the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Slice returns a slice of int64 pointers from the values +// passed in. +func Int64Slice(vs []int64) []*int64 { + ps := make([]*int64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Int64Map returns a map of int64 pointers from the values +// passed in. +func Int64Map(vs map[string]int64) map[string]*int64 { + ps := make(map[string]*int64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint returns a pointer value for the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintSlice returns a slice of uint pointers from the values +// passed in. +func UintSlice(vs []uint) []*uint { + ps := make([]*uint, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// UintMap returns a map of uint pointers from the values +// passed in. +func UintMap(vs map[string]uint) map[string]*uint { + ps := make(map[string]*uint, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint8 returns a pointer value for the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Slice returns a slice of uint8 pointers from the values +// passed in. +func Uint8Slice(vs []uint8) []*uint8 { + ps := make([]*uint8, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint8Map returns a map of uint8 pointers from the values +// passed in. +func Uint8Map(vs map[string]uint8) map[string]*uint8 { + ps := make(map[string]*uint8, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint16 returns a pointer value for the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Slice returns a slice of uint16 pointers from the values +// passed in. +func Uint16Slice(vs []uint16) []*uint16 { + ps := make([]*uint16, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint16Map returns a map of uint16 pointers from the values +// passed in. +func Uint16Map(vs map[string]uint16) map[string]*uint16 { + ps := make(map[string]*uint16, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint32 returns a pointer value for the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Slice returns a slice of uint32 pointers from the values +// passed in. +func Uint32Slice(vs []uint32) []*uint32 { + ps := make([]*uint32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint32Map returns a map of uint32 pointers from the values +// passed in. +func Uint32Map(vs map[string]uint32) map[string]*uint32 { + ps := make(map[string]*uint32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Uint64 returns a pointer value for the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Slice returns a slice of uint64 pointers from the values +// passed in. +func Uint64Slice(vs []uint64) []*uint64 { + ps := make([]*uint64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Uint64Map returns a map of uint64 pointers from the values +// passed in. +func Uint64Map(vs map[string]uint64) map[string]*uint64 { + ps := make(map[string]*uint64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float32 returns a pointer value for the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Slice returns a slice of float32 pointers from the values +// passed in. +func Float32Slice(vs []float32) []*float32 { + ps := make([]*float32, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float32Map returns a map of float32 pointers from the values +// passed in. +func Float32Map(vs map[string]float32) map[string]*float32 { + ps := make(map[string]*float32, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Float64 returns a pointer value for the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Slice returns a slice of float64 pointers from the values +// passed in. +func Float64Slice(vs []float64) []*float64 { + ps := make([]*float64, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// Float64Map returns a map of float64 pointers from the values +// passed in. +func Float64Map(vs map[string]float64) map[string]*float64 { + ps := make(map[string]*float64, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Time returns a pointer value for the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeSlice returns a slice of time.Time pointers from the values +// passed in. +func TimeSlice(vs []time.Time) []*time.Time { + ps := make([]*time.Time, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// TimeMap returns a map of time.Time pointers from the values +// passed in. +func TimeMap(vs map[string]time.Time) map[string]*time.Time { + ps := make(map[string]*time.Time, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} + +// Duration returns a pointer value for the time.Duration value passed in. +func Duration(v time.Duration) *time.Duration { + return &v +} + +// DurationSlice returns a slice of time.Duration pointers from the values +// passed in. +func DurationSlice(vs []time.Duration) []*time.Duration { + ps := make([]*time.Duration, len(vs)) + for i, v := range vs { + vv := v + ps[i] = &vv + } + + return ps +} + +// DurationMap returns a map of time.Duration pointers from the values +// passed in. +func DurationMap(vs map[string]time.Duration) map[string]*time.Duration { + ps := make(map[string]*time.Duration, len(vs)) + for k, v := range vs { + vv := v + ps[k] = &vv + } + + return ps +} diff --git a/vendor/github.com/aws/smithy-go/rand/doc.go b/vendor/github.com/aws/smithy-go/rand/doc.go new file mode 100644 index 00000000000..f8b25d56259 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/doc.go @@ -0,0 +1,3 @@ +// Package rand provides utilities for creating and working with random value +// generators. +package rand diff --git a/vendor/github.com/aws/smithy-go/rand/rand.go b/vendor/github.com/aws/smithy-go/rand/rand.go new file mode 100644 index 00000000000..9c479f62b59 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/rand.go @@ -0,0 +1,31 @@ +package rand + +import ( + "crypto/rand" + "fmt" + "io" + "math/big" +) + +func init() { + Reader = rand.Reader +} + +// Reader provides a random reader that can reset during testing. +var Reader io.Reader + +// Int63n returns a int64 between zero and value of max, read from an io.Reader source. +func Int63n(reader io.Reader, max int64) (int64, error) { + bi, err := rand.Int(reader, big.NewInt(max)) + if err != nil { + return 0, fmt.Errorf("failed to read random value, %w", err) + } + + return bi.Int64(), nil +} + +// CryptoRandInt63n returns a random int64 between zero and value of max +// obtained from the crypto rand source. +func CryptoRandInt63n(max int64) (int64, error) { + return Int63n(Reader, max) +} diff --git a/vendor/github.com/aws/smithy-go/rand/uuid.go b/vendor/github.com/aws/smithy-go/rand/uuid.go new file mode 100644 index 00000000000..dc81cbc68ac --- /dev/null +++ b/vendor/github.com/aws/smithy-go/rand/uuid.go @@ -0,0 +1,87 @@ +package rand + +import ( + "encoding/hex" + "io" +) + +const dash byte = '-' + +// UUIDIdempotencyToken provides a utility to get idempotency tokens in the +// UUID format. +type UUIDIdempotencyToken struct { + uuid *UUID +} + +// NewUUIDIdempotencyToken returns a idempotency token provider returning +// tokens in the UUID random format using the reader provided. +func NewUUIDIdempotencyToken(r io.Reader) *UUIDIdempotencyToken { + return &UUIDIdempotencyToken{uuid: NewUUID(r)} +} + +// GetIdempotencyToken returns a random UUID value for Idempotency token. +func (u UUIDIdempotencyToken) GetIdempotencyToken() (string, error) { + return u.uuid.GetUUID() +} + +// UUID provides computing random UUID version 4 values from a random source +// reader. +type UUID struct { + randSrc io.Reader +} + +// NewUUID returns an initialized UUID value that can be used to retrieve +// random UUID version 4 values. +func NewUUID(r io.Reader) *UUID { + return &UUID{randSrc: r} +} + +// GetUUID returns a random UUID version 4 string representation sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetUUID() (string, error) { + var b [16]byte + if _, err := io.ReadFull(r.randSrc, b[:]); err != nil { + return "", err + } + r.makeUUIDv4(b[:]) + return format(b), nil +} + +// GetBytes returns a byte slice containing a random UUID version 4 sourced from the random reader the +// UUID was created with. Returns an error if unable to compute the UUID. +func (r *UUID) GetBytes() (u []byte, err error) { + u = make([]byte, 16) + if _, err = io.ReadFull(r.randSrc, u); err != nil { + return u, err + } + r.makeUUIDv4(u) + return u, nil +} + +func (r *UUID) makeUUIDv4(u []byte) { + // 13th character is "4" + u[6] = (u[6] & 0x0f) | 0x40 // Version 4 + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] & 0x3f) | 0x80 // Variant most significant bits are 10x where x can be either 1 or 0 +} + +// Format returns the canonical text representation of a UUID. +// This implementation is optimized to not use fmt. +// Example: 82e42f16-b6cc-4d5b-95f5-d403c4befd3d +func format(u [16]byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + + var scratch [36]byte + + hex.Encode(scratch[:8], u[0:4]) + scratch[8] = dash + hex.Encode(scratch[9:13], u[4:6]) + scratch[13] = dash + hex.Encode(scratch[14:18], u[6:8]) + scratch[18] = dash + hex.Encode(scratch[19:23], u[8:10]) + scratch[23] = dash + hex.Encode(scratch[24:], u[10:]) + + return string(scratch[:]) +} diff --git a/vendor/github.com/aws/smithy-go/time/time.go b/vendor/github.com/aws/smithy-go/time/time.go new file mode 100644 index 00000000000..b552a09f8a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/time/time.go @@ -0,0 +1,134 @@ +package time + +import ( + "context" + "fmt" + "math/big" + "strings" + "time" +) + +const ( + // dateTimeFormat is a IMF-fixdate formatted RFC3339 section 5.6 + dateTimeFormatInput = "2006-01-02T15:04:05.999999999Z" + dateTimeFormatInputNoZ = "2006-01-02T15:04:05.999999999" + dateTimeFormatOutput = "2006-01-02T15:04:05.999Z" + + // httpDateFormat is a date time defined by RFC 7231#section-7.1.1.1 + // IMF-fixdate with no UTC offset. + httpDateFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // Additional formats needed for compatibility. + httpDateFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + httpDateFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" +) + +var millisecondFloat = big.NewFloat(1e3) + +// FormatDateTime formats value as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func FormatDateTime(value time.Time) string { + return value.UTC().Format(dateTimeFormatOutput) +} + +// ParseDateTime parses a string as a date-time, (RFC3339 section 5.6) +// +// Example: 1985-04-12T23:20:50.52Z +func ParseDateTime(value string) (time.Time, error) { + return tryParse(value, + dateTimeFormatInput, + dateTimeFormatInputNoZ, + time.RFC3339Nano, + time.RFC3339, + ) +} + +// FormatHTTPDate formats value as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func FormatHTTPDate(value time.Time) string { + return value.UTC().Format(httpDateFormat) +} + +// ParseHTTPDate parses a string as a http-date, (RFC 7231#section-7.1.1.1 IMF-fixdate) +// +// Example: Tue, 29 Apr 2014 18:30:38 GMT +func ParseHTTPDate(value string) (time.Time, error) { + return tryParse(value, + httpDateFormat, + httpDateFormatSingleDigitDay, + httpDateFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) +} + +// FormatEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func FormatEpochSeconds(value time.Time) float64 { + ms := value.UnixNano() / int64(time.Millisecond) + return float64(ms) / 1e3 +} + +// ParseEpochSeconds returns value as a Unix time in seconds with with decimal precision +// +// Example: 1515531081.123 +func ParseEpochSeconds(value float64) time.Time { + f := big.NewFloat(value) + f = f.Mul(f, millisecondFloat) + i, _ := f.Int64() + // Offset to `UTC` because time.Unix returns the time value based on system + // local setting. + return time.Unix(0, i*1e6).UTC() +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %w", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s strings.Builder + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} + +// SleepWithContext will wait for the timer duration to expire, or until the context +// is canceled. Whichever happens first. If the context is canceled the +// Context's error will be returned. +func SleepWithContext(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go new file mode 100644 index 00000000000..bc4ad6e7973 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/checksum_middleware.go @@ -0,0 +1,70 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +const contentMD5Header = "Content-Md5" + +// contentMD5Checksum provides a middleware to compute and set +// content-md5 checksum for a http request +type contentMD5Checksum struct { +} + +// AddContentChecksumMiddleware adds checksum middleware to middleware's +// build step. +func AddContentChecksumMiddleware(stack *middleware.Stack) error { + // This middleware must be executed before request body is set. + return stack.Build.Add(&contentMD5Checksum{}, middleware.Before) +} + +// ID returns the identifier for the checksum middleware +func (m *contentMD5Checksum) ID() string { return "ContentChecksum" } + +// HandleBuild adds behavior to compute md5 checksum and add content-md5 header +// on http request +func (m *contentMD5Checksum) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if Content-MD5 header is already present, return + if v := req.Header.Get(contentMD5Header); len(v) != 0 { + return next.HandleBuild(ctx, in) + } + + // fetch the request stream. + stream := req.GetStream() + // compute checksum if payload is explicit + if stream != nil { + if !req.IsStreamSeekable() { + return out, metadata, fmt.Errorf( + "unseekable stream is not supported for computing md5 checksum") + } + + v, err := computeMD5Checksum(stream) + if err != nil { + return out, metadata, fmt.Errorf("error computing md5 checksum, %w", err) + } + + // reset the request stream + if err := req.RewindStream(); err != nil { + return out, metadata, fmt.Errorf( + "error rewinding request stream after computing md5 checksum, %w", err) + } + + // set the 'Content-MD5' header + req.Header.Set(contentMD5Header, string(v)) + } + + // set md5 header value + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go new file mode 100644 index 00000000000..e691c69bf44 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/client.go @@ -0,0 +1,120 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// ClientDo provides the interface for custom HTTP client implementations. +type ClientDo interface { + Do(*http.Request) (*http.Response, error) +} + +// ClientDoFunc provides a helper to wrap a function as an HTTP client for +// round tripping requests. +type ClientDoFunc func(*http.Request) (*http.Response, error) + +// Do will invoke the underlying func, returning the result. +func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) { + return fn(r) +} + +// ClientHandler wraps a client that implements the HTTP Do method. Standard +// implementation is http.Client. +type ClientHandler struct { + client ClientDo +} + +// NewClientHandler returns an initialized middleware handler for the client. +func NewClientHandler(client ClientDo) ClientHandler { + return ClientHandler{ + client: client, + } +} + +// Handle implements the middleware Handler interface, that will invoke the +// underlying HTTP client. Requires the input to be a Smithy *Request. Returns +// a smithy *Response, or error if the request failed. +func (c ClientHandler) Handle(ctx context.Context, input interface{}) ( + out interface{}, metadata middleware.Metadata, err error, +) { + req, ok := input.(*Request) + if !ok { + return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input) + } + + builtRequest := req.Build(ctx) + if err := ValidateEndpointHost(builtRequest.Host); err != nil { + return nil, metadata, err + } + + resp, err := c.client.Do(builtRequest) + if resp == nil { + // Ensure a http response value is always present to prevent unexpected + // panics. + resp = &http.Response{ + Header: http.Header{}, + Body: http.NoBody, + } + } + if err != nil { + err = &RequestSendError{Err: err} + + // Override the error with a context canceled error, if that was canceled. + select { + case <-ctx.Done(): + err = &smithy.CanceledError{Err: ctx.Err()} + default: + } + } + + // HTTP RoundTripper *should* close the request body. But this may not happen in a timely manner. + // So instead Smithy *Request Build wraps the body to be sent in a safe closer that will clear the + // stream reference so that it can be safely reused. + if builtRequest.Body != nil { + _ = builtRequest.Body.Close() + } + + return &Response{Response: resp}, metadata, err +} + +// RequestSendError provides a generic request transport error. This error +// should wrap errors making HTTP client requests. +// +// The ClientHandler will wrap the HTTP client's error if the client request +// fails, and did not fail because of context canceled. +type RequestSendError struct { + Err error +} + +// ConnectionError returns that the error is related to not being able to send +// the request, or receive a response from the service. +func (e *RequestSendError) ConnectionError() bool { + return true +} + +// Unwrap returns the underlying error, if there was one. +func (e *RequestSendError) Unwrap() error { + return e.Err +} + +func (e *RequestSendError) Error() string { + return fmt.Sprintf("request send failed, %v", e.Err) +} + +// NopClient provides a client that ignores the request, and returns an empty +// successful HTTP response value. +type NopClient struct{} + +// Do ignores the request and returns a 200 status empty response. +func (NopClient) Do(r *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: http.NoBody, + }, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/doc.go b/vendor/github.com/aws/smithy-go/transport/http/doc.go new file mode 100644 index 00000000000..07366ac85a8 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/doc.go @@ -0,0 +1,5 @@ +/* +Package http provides the HTTP transport client and request/response types +needed to round trip API operation calls with an service. +*/ +package http diff --git a/vendor/github.com/aws/smithy-go/transport/http/headerlist.go b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go new file mode 100644 index 00000000000..cbc9deb4df0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/headerlist.go @@ -0,0 +1,163 @@ +package http + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +func splitHeaderListValues(vs []string, splitFn func(string) ([]string, error)) ([]string, error) { + values := make([]string, 0, len(vs)) + + for i := 0; i < len(vs); i++ { + parts, err := splitFn(vs[i]) + if err != nil { + return nil, err + } + values = append(values, parts...) + } + + return values, nil +} + +// SplitHeaderListValues attempts to split the elements of the slice by commas, +// and return a list of all values separated. Returns error if unable to +// separate the values. +func SplitHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, quotedCommaSplit) +} + +func quotedCommaSplit(v string) (parts []string, err error) { + v = strings.TrimSpace(v) + + expectMore := true + for i := 0; i < len(v); i++ { + if unicode.IsSpace(rune(v[i])) { + continue + } + expectMore = false + + // leading space in part is ignored. + // Start of value must be non-space, or quote. + // + // - If quote, enter quoted mode, find next non-escaped quote to + // terminate the value. + // - Otherwise, find next comma to terminate value. + + remaining := v[i:] + + var value string + var valueLen int + if remaining[0] == '"' { + //------------------------------ + // Quoted value + //------------------------------ + var j int + var skipQuote bool + for j += 1; j < len(remaining); j++ { + if remaining[j] == '\\' || (remaining[j] != '\\' && skipQuote) { + skipQuote = !skipQuote + continue + } + if remaining[j] == '"' { + break + } + } + if j == len(remaining) || j == 1 { + return nil, fmt.Errorf("value %v missing closing double quote", + remaining) + } + valueLen = j + 1 + + tail := remaining[valueLen:] + var k int + for ; k < len(tail); k++ { + if !unicode.IsSpace(rune(tail[k])) && tail[k] != ',' { + return nil, fmt.Errorf("value %v has non-space trailing characters", + remaining) + } + if tail[k] == ',' { + expectMore = true + break + } + } + value = remaining[:valueLen] + value, err = strconv.Unquote(value) + if err != nil { + return nil, fmt.Errorf("failed to unquote value %v, %w", value, err) + } + + // Pad valueLen to include trailing space(s) so `i` is updated correctly. + valueLen += k + + } else { + //------------------------------ + // Unquoted value + //------------------------------ + + // Index of the next comma is the length of the value, or end of string. + valueLen = strings.Index(remaining, ",") + if valueLen != -1 { + expectMore = true + } else { + valueLen = len(remaining) + } + value = strings.TrimSpace(remaining[:valueLen]) + } + + i += valueLen + parts = append(parts, value) + + } + + if expectMore { + parts = append(parts, "") + } + + return parts, nil +} + +// SplitHTTPDateTimestampHeaderListValues attempts to split the HTTP-Date +// timestamp values in the slice by commas, and return a list of all values +// separated. The split is aware of the HTTP-Date timestamp format, and will skip +// comma within the timestamp value. Returns an error if unable to split the +// timestamp values. +func SplitHTTPDateTimestampHeaderListValues(vs []string) ([]string, error) { + return splitHeaderListValues(vs, splitHTTPDateHeaderValue) +} + +func splitHTTPDateHeaderValue(v string) ([]string, error) { + if n := strings.Count(v, ","); n <= 1 { + // Nothing to do if only contains a no, or single HTTPDate value + return []string{v}, nil + } else if n%2 == 0 { + return nil, fmt.Errorf("invalid timestamp HTTPDate header comma separations, %q", v) + } + + var parts []string + var i, j int + + var doSplit bool + for ; i < len(v); i++ { + if v[i] == ',' { + if doSplit { + doSplit = false + parts = append(parts, strings.TrimSpace(v[j:i])) + j = i + 1 + } else { + // Skip the first comma in the timestamp value since that + // separates the day from the rest of the timestamp. + // + // Tue, 17 Dec 2019 23:48:18 GMT + doSplit = true + } + } + } + // Add final part + if j < len(v) { + parts = append(parts, strings.TrimSpace(v[j:])) + } + + return parts, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go new file mode 100644 index 00000000000..6b290fec030 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/host.go @@ -0,0 +1,89 @@ +package http + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(host string) error { + var errors strings.Builder + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + if err != nil { + errors.WriteString(fmt.Sprintf("\n endpoint %v, failed to parse, got ", host)) + errors.WriteString(err.Error()) + } + + if !ValidPortNumber(port) { + errors.WriteString(fmt.Sprintf("port number should be in range [0-65535], got %v", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + errors.WriteString("\nendpoint host domain labels must match \"[a-zA-Z0-9-]{1,63}\", but found: ") + errors.WriteString(label) + } + } + + if len(hostname) == 0 && len(port) != 0 { + errors.WriteString("\nendpoint host with port must not be empty") + } + + if len(hostname) > 255 { + errors.WriteString(fmt.Sprintf("\nendpoint host must be less than 255 characters, but was %d", len(hostname))) + } + + if len(errors.String()) > 0 { + return fmt.Errorf("invalid endpoint host%s", errors.String()) + } + return nil +} + +// ValidPortNumber returns whether the port is valid RFC 3986 port. +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} + +// ValidHostLabel returns whether the label is a valid RFC 3986 host abel. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go new file mode 100644 index 00000000000..941a8d6b512 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/internal/io/safe.go @@ -0,0 +1,75 @@ +package io + +import ( + "io" + "sync" +) + +// NewSafeReadCloser returns a new safeReadCloser that wraps readCloser. +func NewSafeReadCloser(readCloser io.ReadCloser) io.ReadCloser { + sr := &safeReadCloser{ + readCloser: readCloser, + } + + if _, ok := readCloser.(io.WriterTo); ok { + return &safeWriteToReadCloser{safeReadCloser: sr} + } + + return sr +} + +// safeWriteToReadCloser wraps a safeReadCloser but exposes a WriteTo interface implementation. This will panic +// if the underlying io.ReadClose does not support WriteTo. Use NewSafeReadCloser to ensure the proper handling of this +// type. +type safeWriteToReadCloser struct { + *safeReadCloser +} + +// WriteTo implements the io.WriteTo interface. +func (r *safeWriteToReadCloser) WriteTo(w io.Writer) (int64, error) { + r.safeReadCloser.mtx.Lock() + defer r.safeReadCloser.mtx.Unlock() + + if r.safeReadCloser.closed { + return 0, io.EOF + } + + return r.safeReadCloser.readCloser.(io.WriterTo).WriteTo(w) +} + +// safeReadCloser wraps a io.ReadCloser and presents an io.ReadCloser interface. When Close is called on safeReadCloser +// the underlying Close method will be executed, and then the reference to the reader will be dropped. This type +// is meant to be used with the net/http library which will retain a reference to the request body for the lifetime +// of a goroutine connection. Wrapping in this manner will ensure that no data race conditions are falsely reported. +// This type is thread-safe. +type safeReadCloser struct { + readCloser io.ReadCloser + closed bool + mtx sync.Mutex +} + +// Read reads up to len(p) bytes into p from the underlying read. If the reader is closed io.EOF will be returned. +func (r *safeReadCloser) Read(p []byte) (n int, err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return 0, io.EOF + } + + return r.readCloser.Read(p) +} + +// Close calls the underlying io.ReadCloser's Close method, removes the reference to the reader, and returns any error +// reported from Close. Subsequent calls to Close will always return a nil error. +func (r *safeReadCloser) Close() error { + r.mtx.Lock() + defer r.mtx.Unlock() + if r.closed { + return nil + } + + r.closed = true + rc := r.readCloser + r.readCloser = nil + return rc.Close() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go new file mode 100644 index 00000000000..5d6a4b23a27 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/md5_checksum.go @@ -0,0 +1,25 @@ +package http + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + "io" +) + +// computeMD5Checksum computes base64 md5 checksum of an io.Reader's contents. +// Returns the byte slice of md5 checksum and an error. +func computeMD5Checksum(r io.Reader) ([]byte, error) { + h := md5.New() + // copy errors may be assumed to be from the body. + _, err := io.Copy(h, r) + if err != nil { + return nil, fmt.Errorf("failed to read body: %w", err) + } + + // encode the md5 checksum in base64. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + return sum64, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go new file mode 100644 index 00000000000..1d3b218a127 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + "io" + "io/ioutil" +) + +// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically +// close the response body of an operation request if the request response +// failed. +func AddErrorCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&errorCloseResponseBodyMiddleware{}, "OperationDeserializer", middleware.Before) +} + +type errorCloseResponseBodyMiddleware struct{} + +func (*errorCloseResponseBodyMiddleware) ID() string { + return "ErrorCloseResponseBody" +} + +func (m *errorCloseResponseBodyMiddleware) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil { + // Consume the full body to prevent TCP connection resets on some platforms + _, _ = io.Copy(ioutil.Discard, resp.Body) + // Do not validate that the response closes successfully. + resp.Body.Close() + } + } + + return out, metadata, err +} + +// AddCloseResponseBodyMiddleware adds the middleware to automatically close +// the response body of an operation request, after the response had been +// deserialized. +func AddCloseResponseBodyMiddleware(stack *middleware.Stack) error { + return stack.Deserialize.Insert(&closeResponseBody{}, "OperationDeserializer", middleware.Before) +} + +type closeResponseBody struct{} + +func (*closeResponseBody) ID() string { + return "CloseResponseBody" +} + +func (m *closeResponseBody) HandleDeserialize( + ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + output middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err := next.HandleDeserialize(ctx, input) + if err != nil { + return out, metadata, err + } + + if resp, ok := out.RawResponse.(*Response); ok { + // Consume the full body to prevent TCP connection resets on some platforms + _, copyErr := io.Copy(ioutil.Discard, resp.Body) + if copyErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse") + } + + closeErr := resp.Body.Close() + if closeErr != nil { + middleware.GetLogger(ctx).Logf(logging.Warn, "failed to close HTTP response body, this may affect connection reuse") + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go new file mode 100644 index 00000000000..9969389bb29 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_content_length.go @@ -0,0 +1,84 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +// ComputeContentLength provides a middleware to set the content-length +// header for the length of a serialize request body. +type ComputeContentLength struct { +} + +// AddComputeContentLengthMiddleware adds ComputeContentLength to the middleware +// stack's Build step. +func AddComputeContentLengthMiddleware(stack *middleware.Stack) error { + return stack.Build.Add(&ComputeContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *ComputeContentLength) ID() string { return "ComputeContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *ComputeContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // do nothing if request content-length was set to 0 or above. + if req.ContentLength >= 0 { + return next.HandleBuild(ctx, in) + } + + // attempt to compute stream length + if n, ok, err := req.StreamLength(); err != nil { + return out, metadata, fmt.Errorf( + "failed getting length of request stream, %w", err) + } else if ok { + req.ContentLength = n + } + + return next.HandleBuild(ctx, in) +} + +// validateContentLength provides a middleware to validate the content-length +// is valid (greater than zero), for the serialized request payload. +type validateContentLength struct{} + +// ValidateContentLengthHeader adds middleware that validates request content-length +// is set to value greater than zero. +func ValidateContentLengthHeader(stack *middleware.Stack) error { + return stack.Build.Add(&validateContentLength{}, middleware.After) +} + +// ID returns the identifier for the ComputeContentLength. +func (m *validateContentLength) ID() string { return "ValidateContentLength" } + +// HandleBuild adds the length of the serialized request to the HTTP header +// if the length can be determined. +func (m *validateContentLength) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + // if request content-length was set to less than 0, return an error + if req.ContentLength < 0 { + return out, metadata, fmt.Errorf( + "content length for payload is required and must be at least 0") + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go new file mode 100644 index 00000000000..855c2272031 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go @@ -0,0 +1,81 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/smithy-go/middleware" +) + +// WithHeaderComment instruments a middleware stack to append an HTTP field +// comment to the given header as specified in RFC 9110 +// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). +// +// The header is case-insensitive. If the provided header exists when the +// middleware runs, the content will be inserted as-is enclosed in parentheses. +// +// Note that per the HTTP specification, comments are only allowed in fields +// containing "comment" as part of their field value definition, but this API +// will NOT verify whether the provided header is one of them. +// +// WithHeaderComment MAY be applied more than once to a middleware stack and/or +// more than once per header. +func WithHeaderComment(header, content string) func(*middleware.Stack) error { + return func(s *middleware.Stack) error { + m, err := getOrAddHeaderComment(s) + if err != nil { + return fmt.Errorf("get or add header comment: %v", err) + } + + m.values.Add(header, content) + return nil + } +} + +type headerCommentMiddleware struct { + values http.Header // hijack case-insensitive access APIs +} + +func (*headerCommentMiddleware) ID() string { + return "headerComment" +} + +func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + r, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for h, contents := range m.values { + for _, c := range contents { + if existing := r.Header.Get(h); existing != "" { + r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) + } + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { + id := (*headerCommentMiddleware)(nil).ID() + m, ok := s.Build.Get(id) + if !ok { + m := &headerCommentMiddleware{values: http.Header{}} + if err := s.Build.Add(m, middleware.After); err != nil { + return nil, fmt.Errorf("add build: %v", err) + } + + return m, nil + } + + hc, ok := m.(*headerCommentMiddleware) + if !ok { + return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) + } + + return hc, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go new file mode 100644 index 00000000000..eac32b4babd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go @@ -0,0 +1,167 @@ +package http + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" +) + +type isContentTypeAutoSet struct{} + +// SetIsContentTypeDefaultValue returns a Context specifying if the request's +// content-type header was set to a default value. +func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { + return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) +} + +// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the +// request is a default value that was auto assigned by an operation +// serializer. Allows middleware post serialization to know if the content-type +// was auto set to a default value or not. +// +// Also returns false if the Context value was never updated to include if +// content-type was set to a default value. +func GetIsContentTypeDefaultValue(ctx context.Context) bool { + v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) + return v +} + +// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover +// middleware to the stack after the operation serializer. This middleware will +// remove the content-type header from the request if it was set as a default +// value, and no request payload is present. +// +// Returns error if unable to add the middleware. +func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + err = stack.Serialize.Insert(removeDefaultContentType{}, + "OperationSerializer", middleware.After) + if err != nil { + return fmt.Errorf("failed to add %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + } + + return nil +} + +// RemoveNoPayloadDefaultContentTypeRemover removes the +// DefaultContentTypeRemover middleware from the stack. Returns an error if +// unable to remove the middleware. +func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) + if err != nil { + return fmt.Errorf("failed to remove %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + + } + return nil +} + +// removeDefaultContentType provides after serialization middleware that will +// remove the content-type header from an HTTP request if the header was set as +// a default value by the operation serializer, and there is no request payload. +type removeDefaultContentType struct{} + +// ID returns the middleware ID +func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } + +// HandleSerialize implements the serialization middleware. +func (removeDefaultContentType) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, meta middleware.Metadata, err error, +) { + req, ok := input.Request.(*Request) + if !ok { + return out, meta, fmt.Errorf( + "unexpected request type %T for removeDefaultContentType middleware", + input.Request) + } + + if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { + req.Header.Del("Content-Type") + input.Request = req + } + + return next.HandleSerialize(ctx, input) +} + +type headerValue struct { + header string + value string + append bool +} + +type headerValueHelper struct { + headerValues []headerValue +} + +func (h *headerValueHelper) addHeaderValue(value headerValue) { + h.headerValues = append(h.headerValues, value) +} + +func (h *headerValueHelper) ID() string { + return "HTTPHeaderHelper" +} + +func (h *headerValueHelper) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (out middleware.BuildOutput, metadata middleware.Metadata, err error) { + req, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for _, value := range h.headerValues { + if value.append { + req.Header.Add(value.header, value.value) + } else { + req.Header.Set(value.header, value.value) + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderValueHelper(stack *middleware.Stack) (*headerValueHelper, error) { + id := (*headerValueHelper)(nil).ID() + m, ok := stack.Build.Get(id) + if !ok { + m = &headerValueHelper{} + err := stack.Build.Add(m, middleware.After) + if err != nil { + return nil, err + } + } + + requestUserAgent, ok := m.(*headerValueHelper) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", m, id) + } + + return requestUserAgent, nil +} + +// AddHeaderValue returns a stack mutator that adds the header value pair to header. +// Appends to any existing values if present. +func AddHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: true}) + return nil + } +} + +// SetHeaderValue returns a stack mutator that adds the header value pair to header. +// Replaces any existing values if present. +func SetHeaderValue(header string, value string) func(stack *middleware.Stack) error { + return func(stack *middleware.Stack) error { + helper, err := getOrAddHeaderValueHelper(stack) + if err != nil { + return err + } + helper.addHeaderValue(headerValue{header: header, value: value, append: false}) + return nil + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go new file mode 100644 index 00000000000..d5909b0a242 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_http_logging.go @@ -0,0 +1,75 @@ +package http + +import ( + "context" + "fmt" + "net/http/httputil" + + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" +) + +// RequestResponseLogger is a deserialize middleware that will log the request and response HTTP messages and optionally +// their respective bodies. Will not perform any logging if none of the options are set. +type RequestResponseLogger struct { + LogRequest bool + LogRequestWithBody bool + + LogResponse bool + LogResponseWithBody bool +} + +// ID is the middleware identifier. +func (r *RequestResponseLogger) ID() string { + return "RequestResponseLogger" +} + +// HandleDeserialize will log the request and response HTTP messages if configured accordingly. +func (r *RequestResponseLogger) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + logger := middleware.GetLogger(ctx) + + if r.LogRequest || r.LogRequestWithBody { + smithyRequest, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in) + } + + rc := smithyRequest.Build(ctx) + reqBytes, err := httputil.DumpRequestOut(rc, r.LogRequestWithBody) + if err != nil { + return out, metadata, err + } + + logger.Logf(logging.Debug, "Request\n%v", string(reqBytes)) + + if r.LogRequestWithBody { + smithyRequest, err = smithyRequest.SetStream(rc.Body) + if err != nil { + return out, metadata, err + } + in.Request = smithyRequest + } + } + + out, metadata, err = next.HandleDeserialize(ctx, in) + + if (err == nil) && (r.LogResponse || r.LogResponseWithBody) { + smithyResponse, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", out.RawResponse) + } + + respBytes, err := httputil.DumpResponse(smithyResponse.Response, r.LogResponseWithBody) + if err != nil { + return out, metadata, fmt.Errorf("failed to dump response %w", err) + } + + logger.Logf(logging.Debug, "Response\n%v", string(respBytes)) + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go new file mode 100644 index 00000000000..d6079b25950 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_metadata.go @@ -0,0 +1,51 @@ +package http + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type ( + hostnameImmutableKey struct{} + hostPrefixDisableKey struct{} +) + +// GetHostnameImmutable retrieves whether the endpoint hostname should be considered +// immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func GetHostnameImmutable(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostnameImmutableKey{}).(bool) + return v +} + +// SetHostnameImmutable sets or modifies whether the request's endpoint hostname +// should be considered immutable or not. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func SetHostnameImmutable(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostnameImmutableKey{}, value) +} + +// IsEndpointHostPrefixDisabled retrieves whether the hostname prefixing is +// disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func IsEndpointHostPrefixDisabled(ctx context.Context) (v bool) { + v, _ = middleware.GetStackValue(ctx, hostPrefixDisableKey{}).(bool) + return v +} + +// DisableEndpointHostPrefix sets or modifies whether the request's endpoint host +// prefixing should be disabled. If value is true, endpoint host prefixing +// will be disabled. +// +// Scoped to stack values. Use middleware#ClearStackValues to clear all stack +// values. +func DisableEndpointHostPrefix(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, hostPrefixDisableKey{}, value) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go new file mode 100644 index 00000000000..326cb8a6cab --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_min_proto.go @@ -0,0 +1,79 @@ +package http + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + "strings" +) + +// MinimumProtocolError is an error type indicating that the established connection did not meet the expected minimum +// HTTP protocol version. +type MinimumProtocolError struct { + proto string + expectedProtoMajor int + expectedProtoMinor int +} + +// Error returns the error message. +func (m *MinimumProtocolError) Error() string { + return fmt.Sprintf("operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + m.expectedProtoMajor, m.expectedProtoMinor, m.proto) +} + +// RequireMinimumProtocol is a deserialization middleware that asserts that the established HTTP connection +// meets the minimum major ad minor version. +type RequireMinimumProtocol struct { + ProtoMajor int + ProtoMinor int +} + +// AddRequireMinimumProtocol adds the RequireMinimumProtocol middleware to the stack using the provided minimum +// protocol major and minor version. +func AddRequireMinimumProtocol(stack *middleware.Stack, major, minor int) error { + return stack.Deserialize.Insert(&RequireMinimumProtocol{ + ProtoMajor: major, + ProtoMinor: minor, + }, "OperationDeserializer", middleware.Before) +} + +// ID returns the middleware identifier string. +func (r *RequireMinimumProtocol) ID() string { + return "RequireMinimumProtocol" +} + +// HandleDeserialize asserts that the established connection is a HTTP connection with the minimum major and minor +// protocol version. +func (r *RequireMinimumProtocol) HandleDeserialize( + ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, +) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*Response) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type: %T", out.RawResponse) + } + + if !strings.HasPrefix(response.Proto, "HTTP") { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + if response.ProtoMajor < r.ProtoMajor || response.ProtoMinor < r.ProtoMinor { + return out, metadata, &MinimumProtocolError{ + proto: response.Proto, + expectedProtoMajor: r.ProtoMajor, + expectedProtoMinor: r.ProtoMinor, + } + } + + return out, metadata, err +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go new file mode 100644 index 00000000000..7177d6f957c --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -0,0 +1,189 @@ +package http + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + iointernal "github.com/aws/smithy-go/transport/http/internal/io" +) + +// Request provides the HTTP specific request structure for HTTP specific +// middleware steps to use to serialize input, and send an operation's request. +type Request struct { + *http.Request + stream io.Reader + isStreamSeekable bool + streamStartPos int64 +} + +// NewStackRequest returns an initialized request ready to be populated with the +// HTTP request details. Returns empty interface so the function can be used as +// a parameter to the Smithy middleware Stack constructor. +func NewStackRequest() interface{} { + return &Request{ + Request: &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, + ContentLength: -1, // default to unknown length + }, + } +} + +// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. +func (r *Request) IsHTTPS() bool { + if r.URL == nil { + return false + } + return strings.EqualFold(r.URL.Scheme, "https") +} + +// Clone returns a deep copy of the Request for the new context. A reference to +// the Stream is copied, but the underlying stream is not copied. +func (r *Request) Clone() *Request { + rc := *r + rc.Request = rc.Request.Clone(context.TODO()) + return &rc +} + +// StreamLength returns the number of bytes of the serialized stream attached +// to the request and ok set. If the length cannot be determined, an error will +// be returned. +func (r *Request) StreamLength() (size int64, ok bool, err error) { + return streamLength(r.stream, r.isStreamSeekable, r.streamStartPos) +} + +func streamLength(stream io.Reader, seekable bool, startPos int64) (size int64, ok bool, err error) { + if stream == nil { + return 0, true, nil + } + + if l, ok := stream.(interface{ Len() int }); ok { + return int64(l.Len()), true, nil + } + + if !seekable { + return 0, false, nil + } + + s := stream.(io.Seeker) + endOffset, err := s.Seek(0, io.SeekEnd) + if err != nil { + return 0, false, err + } + + // The reason to seek to streamStartPos instead of 0 is to ensure that the + // SDK only sends the stream from the starting position the user's + // application provided it to the SDK at. For example application opens a + // file, and wants to skip the first N bytes uploading the rest. The + // application would move the file's offset N bytes, then hand it off to + // the SDK to send the remaining. The SDK should respect that initial offset. + _, err = s.Seek(startPos, io.SeekStart) + if err != nil { + return 0, false, err + } + + return endOffset - startPos, true, nil +} + +// RewindStream will rewind the io.Reader to the relative start position if it +// is an io.Seeker. +func (r *Request) RewindStream() error { + // If there is no stream there is nothing to rewind. + if r.stream == nil { + return nil + } + + if !r.isStreamSeekable { + return fmt.Errorf("request stream is not seekable") + } + _, err := r.stream.(io.Seeker).Seek(r.streamStartPos, io.SeekStart) + return err +} + +// GetStream returns the request stream io.Reader if a stream is set. If no +// stream is present nil will be returned. +func (r *Request) GetStream() io.Reader { + return r.stream +} + +// IsStreamSeekable returns whether the stream is seekable. +func (r *Request) IsStreamSeekable() bool { + return r.isStreamSeekable +} + +// SetStream returns a clone of the request with the stream set to the provided +// reader. May return an error if the provided reader is seekable but returns +// an error. +func (r *Request) SetStream(reader io.Reader) (rc *Request, err error) { + rc = r.Clone() + + if reader == http.NoBody { + reader = nil + } + + var isStreamSeekable bool + var streamStartPos int64 + switch v := reader.(type) { + case io.Seeker: + n, err := v.Seek(0, io.SeekCurrent) + if err != nil { + return r, err + } + isStreamSeekable = true + streamStartPos = n + default: + // If the stream length can be determined, and is determined to be empty, + // use a nil stream to prevent confusion between empty vs not-empty + // streams. + length, ok, err := streamLength(reader, false, 0) + if err != nil { + return nil, err + } else if ok && length == 0 { + reader = nil + } + } + + rc.stream = reader + rc.isStreamSeekable = isStreamSeekable + rc.streamStartPos = streamStartPos + + return rc, err +} + +// Build returns a build standard HTTP request value from the Smithy request. +// The request's stream is wrapped in a safe container that allows it to be +// reused for subsequent attempts. +func (r *Request) Build(ctx context.Context) *http.Request { + req := r.Request.Clone(ctx) + + if r.stream == nil && req.ContentLength == -1 { + req.ContentLength = 0 + } + + switch stream := r.stream.(type) { + case *io.PipeReader: + req.Body = ioutil.NopCloser(stream) + req.ContentLength = -1 + default: + // HTTP Client Request must only have a non-nil body if the + // ContentLength is explicitly unknown (-1) or non-zero. The HTTP + // Client will interpret a non-nil body and ContentLength 0 as + // "unknown". This is unwanted behavior. + if req.ContentLength != 0 && r.stream != nil { + req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream)) + } + } + + return req +} + +// RequestCloner is a function that can take an input request type and clone the request +// for use in a subsequent retry attempt. +func RequestCloner(v interface{}) interface{} { + return v.(*Request).Clone() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/response.go b/vendor/github.com/aws/smithy-go/transport/http/response.go new file mode 100644 index 00000000000..0c13bfcc8e2 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/response.go @@ -0,0 +1,34 @@ +package http + +import ( + "fmt" + "net/http" +) + +// Response provides the HTTP specific response structure for HTTP specific +// middleware steps to use to deserialize the response from an operation call. +type Response struct { + *http.Response +} + +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value. +type ResponseError struct { + Response *Response + Err error +} + +// HTTPStatusCode returns the HTTP response status code received from the service. +func (e *ResponseError) HTTPStatusCode() int { return e.Response.StatusCode } + +// HTTPResponse returns the HTTP response received from the service. +func (e *ResponseError) HTTPResponse() *Response { return e.Response } + +// Unwrap returns the nested error if any, or nil. +func (e *ResponseError) Unwrap() error { return e.Err } + +func (e *ResponseError) Error() string { + return fmt.Sprintf( + "http response error StatusCode: %d, %v", + e.Response.StatusCode, e.Err) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/time.go b/vendor/github.com/aws/smithy-go/transport/http/time.go new file mode 100644 index 00000000000..607b196a8bd --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/time.go @@ -0,0 +1,13 @@ +package http + +import ( + "time" + + smithytime "github.com/aws/smithy-go/time" +) + +// ParseTime parses a time string like the HTTP Date header. This uses a more +// relaxed rule set for date parsing compared to the standard library. +func ParseTime(text string) (t time.Time, err error) { + return smithytime.ParseHTTPDate(text) +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/url.go b/vendor/github.com/aws/smithy-go/transport/http/url.go new file mode 100644 index 00000000000..60a5fc1002a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/url.go @@ -0,0 +1,44 @@ +package http + +import "strings" + +// JoinPath returns an absolute URL path composed of the two paths provided. +// Enforces that the returned path begins with '/'. If added path is empty the +// returned path suffix will match the first parameter suffix. +func JoinPath(a, b string) string { + if len(a) == 0 { + a = "/" + } else if a[0] != '/' { + a = "/" + a + } + + if len(b) != 0 && b[0] == '/' { + b = b[1:] + } + + if len(b) != 0 && len(a) > 1 && a[len(a)-1] != '/' { + a = a + "/" + } + + return a + b +} + +// JoinRawQuery returns an absolute raw query expression. Any duplicate '&' +// will be collapsed to single separator between values. +func JoinRawQuery(a, b string) string { + a = strings.TrimFunc(a, isAmpersand) + b = strings.TrimFunc(b, isAmpersand) + + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + + return a + "&" + b +} + +func isAmpersand(v rune) bool { + return v == '&' +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/user_agent.go b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go new file mode 100644 index 00000000000..71a7e0d8af5 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/user_agent.go @@ -0,0 +1,37 @@ +package http + +import ( + "strings" +) + +// UserAgentBuilder is a builder for a HTTP User-Agent string. +type UserAgentBuilder struct { + sb strings.Builder +} + +// NewUserAgentBuilder returns a new UserAgentBuilder. +func NewUserAgentBuilder() *UserAgentBuilder { + return &UserAgentBuilder{sb: strings.Builder{}} +} + +// AddKey adds the named component/product to the agent string +func (u *UserAgentBuilder) AddKey(key string) { + u.appendTo(key) +} + +// AddKeyValue adds the named key to the agent string with the given value. +func (u *UserAgentBuilder) AddKeyValue(key, value string) { + u.appendTo(key + "/" + value) +} + +// Build returns the constructed User-Agent string. May be called multiple times. +func (u *UserAgentBuilder) Build() string { + return u.sb.String() +} + +func (u *UserAgentBuilder) appendTo(value string) { + if u.sb.Len() > 0 { + u.sb.WriteRune(' ') + } + u.sb.WriteString(value) +} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go new file mode 100644 index 00000000000..b5eedc1f90a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/validation.go @@ -0,0 +1,140 @@ +package smithy + +import ( + "bytes" + "fmt" + "strings" +) + +// An InvalidParamsError provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type InvalidParamsError struct { + // Context is the base context of the invalid parameter group. + Context string + errs []InvalidParamError +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *InvalidParamsError) Add(err InvalidParamError) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another InvalidParamsError +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e *InvalidParamsError) Len() int { + return len(e.errs) +} + +// Error returns the string formatted form of the invalid parameters. +func (e InvalidParamsError) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Error()) + } + + return w.String() +} + +// Errs returns a slice of the invalid parameters +func (e InvalidParamsError) Errs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An InvalidParamError represents an invalid parameter error type. +type InvalidParamError interface { + error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type invalidParamError struct { + context string + nestedContext string + field string + reason string +} + +// Error returns the string version of the invalid parameter error. +func (e invalidParamError) Error() string { + return fmt.Sprintf("%s, %s.", e.reason, e.Field()) +} + +// Field Returns the field and context the error occurred. +func (e invalidParamError) Field() string { + sb := &strings.Builder{} + sb.WriteString(e.context) + if sb.Len() > 0 { + if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { + sb.WriteRune('.') + } + } + if len(e.nestedContext) > 0 { + sb.WriteString(e.nestedContext) + sb.WriteRune('.') + } + sb.WriteString(e.field) + return sb.String() +} + +// SetContext updates the base context of the error. +func (e *invalidParamError) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *invalidParamError) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + return + } + // Check if our nested context is an index into a slice or map + if e.nestedContext[:1] != "[" { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + return + } + e.nestedContext = ctx + e.nestedContext +} + +// An ParamRequiredError represents an required parameter error. +type ParamRequiredError struct { + invalidParamError +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ParamRequiredError { + return &ParamRequiredError{ + invalidParamError{ + field: field, + reason: fmt.Sprintf("missing required field"), + }, + } +} diff --git a/vendor/github.com/ebitengine/purego/.gitignore b/vendor/github.com/ebitengine/purego/.gitignore new file mode 100644 index 00000000000..b25c15b81fa --- /dev/null +++ b/vendor/github.com/ebitengine/purego/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/ebitengine/purego/LICENSE b/vendor/github.com/ebitengine/purego/LICENSE new file mode 100644 index 00000000000..8dada3edaf5 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ebitengine/purego/README.md b/vendor/github.com/ebitengine/purego/README.md new file mode 100644 index 00000000000..f1ff9053ace --- /dev/null +++ b/vendor/github.com/ebitengine/purego/README.md @@ -0,0 +1,97 @@ +# purego +[![Go Reference](https://pkg.go.dev/badge/github.com/ebitengine/purego?GOOS=darwin.svg)](https://pkg.go.dev/github.com/ebitengine/purego?GOOS=darwin) + +A library for calling C functions from Go without Cgo. + +> This is beta software so expect bugs and potentially API breaking changes +> but each release will be tagged to avoid breaking people's code. +> Bug reports are encouraged. + +## Motivation + +The [Ebitengine](https://github.com/hajimehoshi/ebiten) game engine was ported to use only Go on Windows. This enabled +cross-compiling to Windows from any other operating system simply by setting `GOOS=windows`. The purego project was +born to bring that same vision to the other platforms supported by Ebitengine. + +## Benefits + +- **Simple Cross-Compilation**: No C means you can build for other platforms easily without a C compiler. +- **Faster Compilation**: Efficiently cache your entirely Go builds. +- **Smaller Binaries**: Using Cgo generates a C wrapper function for each C function called. Purego doesn't! +- **Dynamic Linking**: Load symbols at runtime and use it as a plugin system. +- **Foreign Function Interface**: Call into other languages that are compiled into shared objects. +- **Cgo Fallback**: Works even with CGO_ENABLED=1 so incremental porting is possible. +This also means unsupported GOARCHs (freebsd/riscv64, linux/mips, etc.) will still work +except for float arguments and return values. + +## Supported Platforms + +- **FreeBSD**: amd64, arm64 +- **Linux**: amd64, arm64 +- **macOS / iOS**: amd64, arm64 +- **Windows**: 386*, amd64, arm*, arm64 + +`*` These architectures only support SyscallN and NewCallback + +## Example + +The example below only showcases purego use for macOS and Linux. The other platforms require special handling which can +be seen in the complete example at [examples/libc](https://github.com/ebitengine/purego/tree/main/examples/libc) which supports Windows and FreeBSD. + +```go +package main + +import ( + "fmt" + "runtime" + + "github.com/ebitengine/purego" +) + +func getSystemLibrary() string { + switch runtime.GOOS { + case "darwin": + return "/usr/lib/libSystem.B.dylib" + case "linux": + return "libc.so.6" + default: + panic(fmt.Errorf("GOOS=%s is not supported", runtime.GOOS)) + } +} + +func main() { + libc, err := purego.Dlopen(getSystemLibrary(), purego.RTLD_NOW|purego.RTLD_GLOBAL) + if err != nil { + panic(err) + } + var puts func(string) + purego.RegisterLibFunc(&puts, libc, "puts") + puts("Calling C from Go without Cgo!") +} +``` + +Then to run: `CGO_ENABLED=0 go run main.go` + +## Questions + +If you have questions about how to incorporate purego in your project or want to discuss +how it works join the [Discord](https://discord.gg/HzGZVD6BkY)! + +### External Code + +Purego uses code that originates from the Go runtime. These files are under the BSD-3 +License that can be found [in the Go Source](https://github.com/golang/go/blob/master/LICENSE). +This is a list of the copied files: + +* `abi_*.h` from package `runtime/cgo` +* `zcallback_darwin_*.s` from package `runtime` +* `internal/fakecgo/abi_*.h` from package `runtime/cgo` +* `internal/fakecgo/asm_GOARCH.s` from package `runtime/cgo` +* `internal/fakecgo/callbacks.go` from package `runtime/cgo` +* `internal/fakecgo/go_GOOS_GOARCH.go` from package `runtime/cgo` +* `internal/fakecgo/iscgo.go` from package `runtime/cgo` +* `internal/fakecgo/setenv.go` from package `runtime/cgo` +* `internal/fakecgo/freebsd.go` from package `runtime/cgo` + +The files `abi_*.h` and `internal/fakecgo/abi_*.h` are the same because Bazel does not support cross-package use of +`#include` so we need each one once per package. (cf. [issue](https://github.com/bazelbuild/rules_go/issues/3636)) diff --git a/vendor/github.com/ebitengine/purego/abi_amd64.h b/vendor/github.com/ebitengine/purego/abi_amd64.h new file mode 100644 index 00000000000..9949435fe9e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/abi_arm64.h b/vendor/github.com/ebitengine/purego/abi_arm64.h new file mode 100644 index 00000000000..5d5061ec1db --- /dev/null +++ b/vendor/github.com/ebitengine/purego/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/cgo.go b/vendor/github.com/ebitengine/purego/cgo.go new file mode 100644 index 00000000000..7d5abef3499 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/cgo.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && (darwin || freebsd || linux) + +package purego + +// if CGO_ENABLED=1 import the Cgo runtime to ensure that it is set up properly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// Even if CGO_ENABLED=1 the Cgo runtime is not imported unless `import "C"` is used. +// which will import this package automatically. Normally this isn't an issue since it +// usually isn't possible to call into C without using that import. However, with purego +// it is since we don't use `import "C"`! +import ( + _ "runtime/cgo" + + _ "github.com/ebitengine/purego/internal/cgo" +) diff --git a/vendor/github.com/ebitengine/purego/dlerror.go b/vendor/github.com/ebitengine/purego/dlerror.go new file mode 100644 index 00000000000..95cdfe16f24 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlerror.go @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +package purego + +// Dlerror represents an error value returned from Dlopen, Dlsym, or Dlclose. +// +// This type is not available on Windows as there is no counterpart to it on Windows. +type Dlerror struct { + s string +} + +func (e Dlerror) Error() string { + return e.s +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn.go b/vendor/github.com/ebitengine/purego/dlfcn.go new file mode 100644 index 00000000000..f70a24584d6 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn.go @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build (darwin || freebsd || linux) && !android && !faketime + +package purego + +import ( + "unsafe" +) + +// Unix Specification for dlfcn.h: https://pubs.opengroup.org/onlinepubs/7908799/xsh/dlfcn.h.html + +var ( + fnDlopen func(path string, mode int) uintptr + fnDlsym func(handle uintptr, name string) uintptr + fnDlerror func() string + fnDlclose func(handle uintptr) bool +) + +func init() { + RegisterFunc(&fnDlopen, dlopenABI0) + RegisterFunc(&fnDlsym, dlsymABI0) + RegisterFunc(&fnDlerror, dlerrorABI0) + RegisterFunc(&fnDlclose, dlcloseABI0) +} + +// Dlopen examines the dynamic library or bundle file specified by path. If the file is compatible +// with the current process and has not already been loaded into the +// current process, it is loaded and linked. After being linked, if it contains +// any initializer functions, they are called, before Dlopen +// returns. It returns a handle that can be used with Dlsym and Dlclose. +// A second call to Dlopen with the same path will return the same handle, but the internal +// reference count for the handle will be incremented. Therefore, all +// Dlopen calls should be balanced with a Dlclose call. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.LoadLibrary], [golang.org/x/sys/windows.LoadLibraryEx], +// [golang.org/x/sys/windows.NewLazyDLL], or [golang.org/x/sys/windows.NewLazySystemDLL] for Windows instead. +func Dlopen(path string, mode int) (uintptr, error) { + u := fnDlopen(path, mode) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlsym takes a "handle" of a dynamic library returned by Dlopen and the symbol name. +// It returns the address where that symbol is loaded into memory. If the symbol is not found, +// in the specified library or any of the libraries that were automatically loaded by Dlopen +// when that library was loaded, Dlsym returns zero. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.GetProcAddress] for Windows instead. +func Dlsym(handle uintptr, name string) (uintptr, error) { + u := fnDlsym(handle, name) + if u == 0 { + return 0, Dlerror{fnDlerror()} + } + return u, nil +} + +// Dlclose decrements the reference count on the dynamic library handle. +// If the reference count drops to zero and no other loaded libraries +// use symbols in it, then the dynamic library is unloaded. +// +// This function is not available on Windows. +// Use [golang.org/x/sys/windows.FreeLibrary] for Windows instead. +func Dlclose(handle uintptr) error { + if fnDlclose(handle) { + return Dlerror{fnDlerror()} + } + return nil +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} + +// these functions exist in dlfcn_stubs.s and are calling C functions linked to in dlfcn_GOOS.go +// the indirection is necessary because a function is actually a pointer to the pointer to the code. +// sadly, I do not know of anyway to remove the assembly stubs entirely because //go:linkname doesn't +// appear to work if you link directly to the C function on darwin arm64. + +//go:linkname dlopen dlopen +var dlopen uintptr +var dlopenABI0 = uintptr(unsafe.Pointer(&dlopen)) + +//go:linkname dlsym dlsym +var dlsym uintptr +var dlsymABI0 = uintptr(unsafe.Pointer(&dlsym)) + +//go:linkname dlclose dlclose +var dlclose uintptr +var dlcloseABI0 = uintptr(unsafe.Pointer(&dlclose)) + +//go:linkname dlerror dlerror +var dlerror uintptr +var dlerrorABI0 = uintptr(unsafe.Pointer(&dlerror)) diff --git a/vendor/github.com/ebitengine/purego/dlfcn_android.go b/vendor/github.com/ebitengine/purego/dlfcn_android.go new file mode 100644 index 00000000000..0d5341764ed --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_android.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import "github.com/ebitengine/purego/internal/cgo" + +// Source for constants: https://android.googlesource.com/platform/bionic/+/refs/heads/main/libc/include/dlfcn.h + +const ( + is64bit = 1 << (^uintptr(0) >> 63) / 2 + is32bit = 1 - is64bit + RTLD_DEFAULT = is32bit * 0xffffffff + RTLD_LAZY = 0x00000001 + RTLD_NOW = is64bit * 0x00000002 + RTLD_LOCAL = 0x00000000 + RTLD_GLOBAL = is64bit*0x00100 | is32bit*0x00000002 +) + +func Dlopen(path string, mode int) (uintptr, error) { + return cgo.Dlopen(path, mode) +} + +func Dlsym(handle uintptr, name string) (uintptr, error) { + return cgo.Dlsym(handle, name) +} + +func Dlclose(handle uintptr) error { + return cgo.Dlclose(handle) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return Dlsym(handle, name) +} diff --git a/vendor/github.com/ebitengine/purego/dlfcn_darwin.go b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go new file mode 100644 index 00000000000..5f876278a3e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_darwin.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Source for constants: https://opensource.apple.com/source/dyld/dyld-360.14/include/dlfcn.h.auto.html + +const ( + RTLD_DEFAULT = 1<<64 - 2 // Pseudo-handle for dlsym so search for any loaded symbol + RTLD_LAZY = 0x1 // Relocations are performed at an implementation-dependent time. + RTLD_NOW = 0x2 // Relocations are performed when the object is loaded. + RTLD_LOCAL = 0x4 // All symbols are not made available for relocation processing by other modules. + RTLD_GLOBAL = 0x8 // All symbols are available for relocation processing of other modules. +) + +//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" + +//go:cgo_import_dynamic purego_dlopen dlopen "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlsym dlsym "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlerror dlerror "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_dlclose dlclose "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go new file mode 100644 index 00000000000..6b371620d96 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/dlfcn_freebsd.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +// Constants as defined in https://github.com/freebsd/freebsd-src/blob/main/include/dlfcn.h +const ( + intSize = 32 << (^uint(0) >> 63) // 32 or 64 + RTLD_DEFAULT = 1< C) +// +// string <=> char* +// bool <=> _Bool +// uintptr <=> uintptr_t +// uint <=> uint32_t or uint64_t +// uint8 <=> uint8_t +// uint16 <=> uint16_t +// uint32 <=> uint32_t +// uint64 <=> uint64_t +// int <=> int32_t or int64_t +// int8 <=> int8_t +// int16 <=> int16_t +// int32 <=> int32_t +// int64 <=> int64_t +// float32 <=> float +// float64 <=> double +// struct <=> struct (WIP - darwin only) +// func <=> C function +// unsafe.Pointer, *T <=> void* +// []T => void* +// +// There is a special case when the last argument of fptr is a variadic interface (or []interface} +// it will be expanded into a call to the C function as if it had the arguments in that slice. +// This means that using arg ...interface{} is like a cast to the function with the arguments inside arg. +// This is not the same as C variadic. +// +// # Memory +// +// In general it is not possible for purego to guarantee the lifetimes of objects returned or received from +// calling functions using RegisterFunc. For arguments to a C function it is important that the C function doesn't +// hold onto a reference to Go memory. This is the same as the [Cgo rules]. +// +// However, there are some special cases. When passing a string as an argument if the string does not end in a null +// terminated byte (\x00) then the string will be copied into memory maintained by purego. The memory is only valid for +// that specific call. Therefore, if the C code keeps a reference to that string it may become invalid at some +// undefined time. However, if the string does already contain a null-terminated byte then no copy is done. +// It is then the responsibility of the caller to ensure the string stays alive as long as it's needed in C memory. +// This can be done using runtime.KeepAlive or allocating the string in C memory using malloc. When a C function +// returns a null-terminated pointer to char a Go string can be used. Purego will allocate a new string in Go memory +// and copy the data over. This string will be garbage collected whenever Go decides it's no longer referenced. +// This C created string will not be freed by purego. If the pointer to char is not null-terminated or must continue +// to point to C memory (because it's a buffer for example) then use a pointer to byte and then convert that to a slice +// using unsafe.Slice. Doing this means that it becomes the responsibility of the caller to care about the lifetime +// of the pointer +// +// # Structs +// +// Purego can handle the most common structs that have fields of builtin types like int8, uint16, float32, etc. However, +// it does not support aligning fields properly. It is therefore the responsibility of the caller to ensure +// that all padding is added to the Go struct to match the C one. See `BoolStructFn` in struct_test.go for an example. +// +// # Example +// +// All functions below call this C function: +// +// char *foo(char *str); +// +// // Let purego convert types +// var foo func(s string) string +// goString := foo("copied") +// // Go will garbage collect this string +// +// // Manually, handle allocations +// var foo2 func(b string) *byte +// mustFree := foo2("not copied\x00") +// defer free(mustFree) +// +// [Cgo rules]: https://pkg.go.dev/cmd/cgo#hdr-Go_references_to_C +func RegisterFunc(fptr interface{}, cfn uintptr) { + fn := reflect.ValueOf(fptr).Elem() + ty := fn.Type() + if ty.Kind() != reflect.Func { + panic("purego: fptr must be a function pointer") + } + if ty.NumOut() > 1 { + panic("purego: function can only return zero or one values") + } + if cfn == 0 { + panic("purego: cfn is nil") + } + if ty.NumOut() == 1 && (ty.Out(0).Kind() == reflect.Float32 || ty.Out(0).Kind() == reflect.Float64) && + runtime.GOARCH != "arm64" && runtime.GOARCH != "amd64" { + panic("purego: float returns are not supported") + } + { + // this code checks how many registers and stack this function will use + // to avoid crashing with too many arguments + var ints int + var floats int + var stack int + for i := 0; i < ty.NumIn(); i++ { + arg := ty.In(i) + switch arg.Kind() { + case reflect.Func: + // This only does preliminary testing to ensure the CDecl argument + // is the first argument. Full testing is done when the callback is actually + // created in NewCallback. + for j := 0; j < arg.NumIn(); j++ { + in := arg.In(j) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if j != 0 { + panic("purego: CDecl must be the first argument") + } + } + case reflect.String, reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Ptr, reflect.UnsafePointer, + reflect.Slice, reflect.Bool: + if ints < numOfIntegerRegisters() { + ints++ + } else { + stack++ + } + case reflect.Float32, reflect.Float64: + const is32bit = unsafe.Sizeof(uintptr(0)) == 4 + if is32bit { + panic("purego: floats only supported on 64bit platforms") + } + if floats < numOfFloats { + floats++ + } else { + stack++ + } + case reflect.Struct: + if runtime.GOOS != "darwin" || (runtime.GOARCH != "amd64" && runtime.GOARCH != "arm64") { + panic("purego: struct arguments are only supported on darwin amd64 & arm64") + } + if arg.Size() == 0 { + continue + } + addInt := func(u uintptr) { + ints++ + } + addFloat := func(u uintptr) { + floats++ + } + addStack := func(u uintptr) { + stack++ + } + _ = addStruct(reflect.New(arg).Elem(), &ints, &floats, &stack, addInt, addFloat, addStack, nil) + default: + panic("purego: unsupported kind " + arg.Kind().String()) + } + } + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + if runtime.GOOS != "darwin" { + panic("purego: struct return values only supported on darwin arm64 & amd64") + } + outType := ty.Out(0) + checkStructFieldsSupported(outType) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + // on amd64 if struct is bigger than 16 bytes allocate the return struct + // and pass it in as a hidden first argument. + ints++ + } + } + sizeOfStack := maxArgs - numOfIntegerRegisters() + if stack > sizeOfStack { + panic("purego: too many arguments") + } + } + v := reflect.MakeFunc(ty, func(args []reflect.Value) (results []reflect.Value) { + if len(args) > 0 { + if variadic, ok := args[len(args)-1].Interface().([]interface{}); ok { + // subtract one from args bc the last argument in args is []interface{} + // which we are currently expanding + tmp := make([]reflect.Value, len(args)-1+len(variadic)) + n := copy(tmp, args[:len(args)-1]) + for i, v := range variadic { + tmp[n+i] = reflect.ValueOf(v) + } + args = tmp + } + } + var sysargs [maxArgs]uintptr + stack := sysargs[numOfIntegerRegisters():] + var floats [numOfFloats]uintptr + var numInts int + var numFloats int + var numStack int + var addStack, addInt, addFloat func(x uintptr) + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Windows arm64 uses the same calling convention as macOS and Linux + addStack = func(x uintptr) { + stack[numStack] = x + numStack++ + } + addInt = func(x uintptr) { + if numInts >= numOfIntegerRegisters() { + addStack(x) + } else { + sysargs[numInts] = x + numInts++ + } + } + addFloat = func(x uintptr) { + if numFloats < len(floats) { + floats[numFloats] = x + numFloats++ + } else { + addStack(x) + } + } + } else { + // On Windows amd64 the arguments are passed in the numbered registered. + // So the first int is in the first integer register and the first float + // is in the second floating register if there is already a first int. + // This is in contrast to how macOS and Linux pass arguments which + // tries to use as many registers as possible in the calling convention. + addStack = func(x uintptr) { + sysargs[numStack] = x + numStack++ + } + addInt = addStack + addFloat = addStack + } + + var keepAlive []interface{} + defer func() { + runtime.KeepAlive(keepAlive) + runtime.KeepAlive(args) + }() + var syscall syscall15Args + if ty.NumOut() == 1 && ty.Out(0).Kind() == reflect.Struct { + outType := ty.Out(0) + if runtime.GOARCH == "amd64" && outType.Size() > maxRegAllocStructSize { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + addInt(val.Pointer()) + } else if runtime.GOARCH == "arm64" && outType.Size() > maxRegAllocStructSize { + isAllFloats, numFields := isAllSameFloat(outType) + if !isAllFloats || numFields > 4 { + val := reflect.New(outType) + keepAlive = append(keepAlive, val) + syscall.arm64_r8 = val.Pointer() + } + } + } + for _, v := range args { + switch v.Kind() { + case reflect.String: + ptr := strings.CString(v.String()) + keepAlive = append(keepAlive, ptr) + addInt(uintptr(unsafe.Pointer(ptr))) + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addInt(uintptr(v.Uint())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addInt(uintptr(v.Int())) + case reflect.Ptr, reflect.UnsafePointer, reflect.Slice: + // There is no need to keepAlive this pointer separately because it is kept alive in the args variable + addInt(v.Pointer()) + case reflect.Func: + addInt(NewCallback(v.Interface())) + case reflect.Bool: + if v.Bool() { + addInt(1) + } else { + addInt(0) + } + case reflect.Float32: + addFloat(uintptr(math.Float32bits(float32(v.Float())))) + case reflect.Float64: + addFloat(uintptr(math.Float64bits(v.Float()))) + case reflect.Struct: + keepAlive = addStruct(v, &numInts, &numFloats, &numStack, addInt, addFloat, addStack, keepAlive) + default: + panic("purego: unsupported kind: " + v.Kind().String()) + } + } + if runtime.GOARCH == "arm64" || runtime.GOOS != "windows" { + // Use the normal arm64 calling convention even on Windows + syscall = syscall15Args{ + cfn, + sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], sysargs[5], + sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14], + floats[0], floats[1], floats[2], floats[3], floats[4], floats[5], floats[6], floats[7], + syscall.arm64_r8, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(&syscall)) + } else { + // This is a fallback for Windows amd64, 386, and arm. Note this may not support floats + syscall.a1, syscall.a2, _ = syscall_syscall15X(cfn, sysargs[0], sysargs[1], sysargs[2], sysargs[3], sysargs[4], + sysargs[5], sysargs[6], sysargs[7], sysargs[8], sysargs[9], sysargs[10], sysargs[11], + sysargs[12], sysargs[13], sysargs[14]) + syscall.f1 = syscall.a2 // on amd64 a2 stores the float return. On 32bit platforms floats aren't support + } + if ty.NumOut() == 0 { + return nil + } + outType := ty.Out(0) + v := reflect.New(outType).Elem() + switch outType.Kind() { + case reflect.Uintptr, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v.SetUint(uint64(syscall.a1)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v.SetInt(int64(syscall.a1)) + case reflect.Bool: + v.SetBool(byte(syscall.a1) != 0) + case reflect.UnsafePointer: + // We take the address and then dereference it to trick go vet from creating a possible miss-use of unsafe.Pointer + v.SetPointer(*(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))) + case reflect.Ptr: + v = reflect.NewAt(outType, unsafe.Pointer(&syscall.a1)).Elem() + case reflect.Func: + // wrap this C function in a nicely typed Go function + v = reflect.New(outType) + RegisterFunc(v.Interface(), syscall.a1) + case reflect.String: + v.SetString(strings.GoString(syscall.a1)) + case reflect.Float32: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(float64(math.Float32frombits(uint32(syscall.f1)))) + case reflect.Float64: + // NOTE: syscall.r2 is only the floating return value on 64bit platforms. + // On 32bit platforms syscall.r2 is the upper part of a 64bit return. + v.SetFloat(math.Float64frombits(uint64(syscall.f1))) + case reflect.Struct: + v = getStruct(outType, syscall) + default: + panic("purego: unsupported return kind: " + outType.Kind().String()) + } + return []reflect.Value{v} + }) + fn.Set(v) +} + +// maxRegAllocStructSize is the biggest a struct can be while still fitting in registers. +// if it is bigger than this than enough space must be allocated on the heap and then passed into +// the function as the first parameter on amd64 or in R8 on arm64. +// +// If you change this make sure to update it in objc_runtime_darwin.go +const maxRegAllocStructSize = 16 + +func isAllSameFloat(ty reflect.Type) (allFloats bool, numFields int) { + allFloats = true + root := ty.Field(0).Type + for root.Kind() == reflect.Struct { + root = root.Field(0).Type + } + first := root.Kind() + if first != reflect.Float32 && first != reflect.Float64 { + allFloats = false + } + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Struct { + var structNumFields int + allFloats, structNumFields = isAllSameFloat(f) + numFields += structNumFields + continue + } + numFields++ + if f.Kind() != first { + allFloats = false + } + } + return allFloats, numFields +} + +func checkStructFieldsSupported(ty reflect.Type) { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i).Type + if f.Kind() == reflect.Array { + f = f.Elem() + } else if f.Kind() == reflect.Struct { + checkStructFieldsSupported(f) + continue + } + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Uintptr, reflect.Ptr, reflect.UnsafePointer, reflect.Float64, reflect.Float32: + default: + panic(fmt.Sprintf("purego: struct field type %s is not supported", f)) + } + } +} + +func roundUpTo8(val uintptr) uintptr { + return (val + 7) &^ 7 +} + +func numOfIntegerRegisters() int { + switch runtime.GOARCH { + case "arm64": + return 8 + case "amd64": + return 6 + default: + // since this platform isn't supported and can therefore only access + // integer registers it is fine to return the maxArgs + return maxArgs + } +} diff --git a/vendor/github.com/ebitengine/purego/go_runtime.go b/vendor/github.com/ebitengine/purego/go_runtime.go new file mode 100644 index 00000000000..13671ff23f2 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/go_runtime.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +package purego + +import ( + "unsafe" +) + +//go:linkname runtime_cgocall runtime.cgocall +func runtime_cgocall(fn uintptr, arg unsafe.Pointer) int32 // from runtime/sys_libc.go diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go new file mode 100644 index 00000000000..b09ecac1cfe --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/dlfcn_cgo_unix.go @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build freebsd || linux + +package cgo + +/* + #cgo LDFLAGS: -ldl + +#include +#include +*/ +import "C" + +import ( + "errors" + "unsafe" +) + +func Dlopen(filename string, flag int) (uintptr, error) { + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + handle := C.dlopen(cfilename, C.int(flag)) + if handle == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(handle), nil +} + +func Dlsym(handle uintptr, symbol string) (uintptr, error) { + csymbol := C.CString(symbol) + defer C.free(unsafe.Pointer(csymbol)) + symbolAddr := C.dlsym(*(*unsafe.Pointer)(unsafe.Pointer(&handle)), csymbol) + if symbolAddr == nil { + return 0, errors.New(C.GoString(C.dlerror())) + } + return uintptr(symbolAddr), nil +} + +func Dlclose(handle uintptr) error { + result := C.dlclose(*(*unsafe.Pointer)(unsafe.Pointer(&handle))) + if result != 0 { + return errors.New(C.GoString(C.dlerror())) + } + return nil +} + +// all that is needed is to assign each dl function because then its +// symbol will then be made available to the linker and linked to inside dlfcn.go +var ( + _ = C.dlopen + _ = C.dlsym + _ = C.dlerror + _ = C.dlclose +) diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/empty.go b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go new file mode 100644 index 00000000000..1d7cffe2a7e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/empty.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package cgo + +// Empty so that importing this package doesn't cause issue for certain platforms. diff --git a/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go new file mode 100644 index 00000000000..37ff24d5c1d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/cgo/syscall_cgo_unix.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build freebsd || (linux && !(arm64 || amd64)) + +package cgo + +// this file is placed inside internal/cgo and not package purego +// because Cgo and assembly files can't be in the same package. + +/* + #cgo LDFLAGS: -ldl + +#include +#include +#include +#include + +typedef struct syscall15Args { + uintptr_t fn; + uintptr_t a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15; + uintptr_t f1, f2, f3, f4, f5, f6, f7, f8; + uintptr_t err; +} syscall15Args; + +void syscall15(struct syscall15Args *args) { + assert((args->f1|args->f2|args->f3|args->f4|args->f5|args->f6|args->f7|args->f8) == 0); + uintptr_t (*func_name)(uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, + uintptr_t a7, uintptr_t a8, uintptr_t a9, uintptr_t a10, uintptr_t a11, uintptr_t a12, + uintptr_t a13, uintptr_t a14, uintptr_t a15); + *(void**)(&func_name) = (void*)(args->fn); + uintptr_t r1 = func_name(args->a1,args->a2,args->a3,args->a4,args->a5,args->a6,args->a7,args->a8,args->a9, + args->a10,args->a11,args->a12,args->a13,args->a14,args->a15); + args->a1 = r1; + args->err = errno; +} + +*/ +import "C" +import "unsafe" + +// assign purego.syscall15XABI0 to the C version of this function. +var Syscall15XABI0 = unsafe.Pointer(C.syscall15) + +//go:nosplit +func Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := C.syscall15Args{ + C.uintptr_t(fn), C.uintptr_t(a1), C.uintptr_t(a2), C.uintptr_t(a3), + C.uintptr_t(a4), C.uintptr_t(a5), C.uintptr_t(a6), + C.uintptr_t(a7), C.uintptr_t(a8), C.uintptr_t(a9), C.uintptr_t(a10), C.uintptr_t(a11), C.uintptr_t(a12), + C.uintptr_t(a13), C.uintptr_t(a14), C.uintptr_t(a15), 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + C.syscall15(&args) + return uintptr(args.a1), 0, uintptr(args.err) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h new file mode 100644 index 00000000000..9949435fe9e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_amd64.h @@ -0,0 +1,99 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These save the frame pointer, so in general, functions that use +// these should have zero frame size to suppress the automatic frame +// pointer, though it's harmless to not do this. + +#ifdef GOOS_windows + +// REGS_HOST_TO_ABI0_STACK is the stack bytes used by +// PUSH_REGS_HOST_TO_ABI0. +#define REGS_HOST_TO_ABI0_STACK (28*8 + 8) + +// PUSH_REGS_HOST_TO_ABI0 prepares for transitioning from +// the host ABI to Go ABI0 code. It saves all registers that are +// callee-save in the host ABI and caller-save in Go ABI0 and prepares +// for entry to Go. +// +// Save DI SI BP BX R12 R13 R14 R15 X6-X15 registers and the DF flag. +// Clear the DF flag for the Go ABI. +// MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +#define PUSH_REGS_HOST_TO_ABI0() \ + PUSHFQ \ + CLD \ + ADJSP $(REGS_HOST_TO_ABI0_STACK - 8) \ + MOVQ DI, (0*0)(SP) \ + MOVQ SI, (1*8)(SP) \ + MOVQ BP, (2*8)(SP) \ + MOVQ BX, (3*8)(SP) \ + MOVQ R12, (4*8)(SP) \ + MOVQ R13, (5*8)(SP) \ + MOVQ R14, (6*8)(SP) \ + MOVQ R15, (7*8)(SP) \ + MOVUPS X6, (8*8)(SP) \ + MOVUPS X7, (10*8)(SP) \ + MOVUPS X8, (12*8)(SP) \ + MOVUPS X9, (14*8)(SP) \ + MOVUPS X10, (16*8)(SP) \ + MOVUPS X11, (18*8)(SP) \ + MOVUPS X12, (20*8)(SP) \ + MOVUPS X13, (22*8)(SP) \ + MOVUPS X14, (24*8)(SP) \ + MOVUPS X15, (26*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*0)(SP), DI \ + MOVQ (1*8)(SP), SI \ + MOVQ (2*8)(SP), BP \ + MOVQ (3*8)(SP), BX \ + MOVQ (4*8)(SP), R12 \ + MOVQ (5*8)(SP), R13 \ + MOVQ (6*8)(SP), R14 \ + MOVQ (7*8)(SP), R15 \ + MOVUPS (8*8)(SP), X6 \ + MOVUPS (10*8)(SP), X7 \ + MOVUPS (12*8)(SP), X8 \ + MOVUPS (14*8)(SP), X9 \ + MOVUPS (16*8)(SP), X10 \ + MOVUPS (18*8)(SP), X11 \ + MOVUPS (20*8)(SP), X12 \ + MOVUPS (22*8)(SP), X13 \ + MOVUPS (24*8)(SP), X14 \ + MOVUPS (26*8)(SP), X15 \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK - 8) \ + POPFQ + +#else +// SysV ABI + +#define REGS_HOST_TO_ABI0_STACK (6*8) + +// SysV MXCSR matches the Go ABI, so we don't have to set that, +// and Go doesn't modify it, so we don't have to save it. +// Both SysV and Go require DF to be cleared, so that's already clear. +// The SysV and Go frame pointer conventions are compatible. +#define PUSH_REGS_HOST_TO_ABI0() \ + ADJSP $(REGS_HOST_TO_ABI0_STACK) \ + MOVQ BP, (5*8)(SP) \ + LEAQ (5*8)(SP), BP \ + MOVQ BX, (0*8)(SP) \ + MOVQ R12, (1*8)(SP) \ + MOVQ R13, (2*8)(SP) \ + MOVQ R14, (3*8)(SP) \ + MOVQ R15, (4*8)(SP) + +#define POP_REGS_HOST_TO_ABI0() \ + MOVQ (0*8)(SP), BX \ + MOVQ (1*8)(SP), R12 \ + MOVQ (2*8)(SP), R13 \ + MOVQ (3*8)(SP), R14 \ + MOVQ (4*8)(SP), R15 \ + MOVQ (5*8)(SP), BP \ + ADJSP $-(REGS_HOST_TO_ABI0_STACK) + +#endif diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h new file mode 100644 index 00000000000..5d5061ec1db --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/abi_arm64.h @@ -0,0 +1,39 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Macros for transitioning from the host ABI to Go ABI0. +// +// These macros save and restore the callee-saved registers +// from the stack, but they don't adjust stack pointer, so +// the user should prepare stack space in advance. +// SAVE_R19_TO_R28(offset) saves R19 ~ R28 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+9*8)(RSP). +// +// SAVE_F8_TO_F15(offset) saves F8 ~ F15 to the stack space +// of ((offset)+0*8)(RSP) ~ ((offset)+7*8)(RSP). +// +// R29 is not saved because Go will save and restore it. + +#define SAVE_R19_TO_R28(offset) \ + STP (R19, R20), ((offset)+0*8)(RSP) \ + STP (R21, R22), ((offset)+2*8)(RSP) \ + STP (R23, R24), ((offset)+4*8)(RSP) \ + STP (R25, R26), ((offset)+6*8)(RSP) \ + STP (R27, g), ((offset)+8*8)(RSP) +#define RESTORE_R19_TO_R28(offset) \ + LDP ((offset)+0*8)(RSP), (R19, R20) \ + LDP ((offset)+2*8)(RSP), (R21, R22) \ + LDP ((offset)+4*8)(RSP), (R23, R24) \ + LDP ((offset)+6*8)(RSP), (R25, R26) \ + LDP ((offset)+8*8)(RSP), (R27, g) /* R28 */ +#define SAVE_F8_TO_F15(offset) \ + FSTPD (F8, F9), ((offset)+0*8)(RSP) \ + FSTPD (F10, F11), ((offset)+2*8)(RSP) \ + FSTPD (F12, F13), ((offset)+4*8)(RSP) \ + FSTPD (F14, F15), ((offset)+6*8)(RSP) +#define RESTORE_F8_TO_F15(offset) \ + FLDPD ((offset)+0*8)(RSP), (F8, F9) \ + FLDPD ((offset)+2*8)(RSP), (F10, F11) \ + FLDPD ((offset)+4*8)(RSP), (F12, F13) \ + FLDPD ((offset)+6*8)(RSP), (F14, F15) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s new file mode 100644 index 00000000000..2b7eb57f8ae --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_amd64.s @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_amd64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +// This signature is known to SWIG, so we can't change it. +TEXT crosscall2(SB), NOSPLIT, $0-0 + PUSH_REGS_HOST_TO_ABI0() + + // Make room for arguments to cgocallback. + ADJSP $0x18 + +#ifndef GOOS_windows + MOVQ DI, 0x0(SP) // fn + MOVQ SI, 0x8(SP) // arg + + // Skip n in DX. + MOVQ CX, 0x10(SP) // ctxt + +#else + MOVQ CX, 0x0(SP) // fn + MOVQ DX, 0x8(SP) // arg + + // Skip n in R8. + MOVQ R9, 0x10(SP) // ctxt + +#endif + + CALL runtime·cgocallback(SB) + + ADJSP $-0x18 + POP_REGS_HOST_TO_ABI0() + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s new file mode 100644 index 00000000000..50e5261d922 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/asm_arm64.s @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" +#include "abi_arm64.h" + +// Called by C code generated by cmd/cgo. +// func crosscall2(fn, a unsafe.Pointer, n int32, ctxt uintptr) +// Saves C callee-saved registers and calls cgocallback with three arguments. +// fn is the PC of a func(a unsafe.Pointer) function. +TEXT crosscall2(SB), NOSPLIT|NOFRAME, $0 +/* + * We still need to save all callee save register as before, and then + * push 3 args for fn (R0, R1, R3), skipping R2. + * Also note that at procedure entry in gc world, 8(RSP) will be the + * first arg. + */ + SUB $(8*24), RSP + STP (R0, R1), (8*1)(RSP) + MOVD R3, (8*3)(RSP) + + SAVE_R19_TO_R28(8*4) + SAVE_F8_TO_F15(8*14) + STP (R29, R30), (8*22)(RSP) + + // Initialize Go ABI environment + BL runtime·load_g(SB) + BL runtime·cgocallback(SB) + + RESTORE_R19_TO_R28(8*4) + RESTORE_F8_TO_F15(8*14) + LDP (8*22)(RSP), (R29, R30) + + ADD $(8*24), RSP + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go new file mode 100644 index 00000000000..f29e690cc15 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/callbacks.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + _ "unsafe" +) + +// TODO: decide if we need _runtime_cgo_panic_internal + +//go:linkname x_cgo_init_trampoline x_cgo_init_trampoline +//go:linkname _cgo_init _cgo_init +var x_cgo_init_trampoline byte +var _cgo_init = &x_cgo_init_trampoline + +// Creates a new system thread without updating any Go state. +// +// This method is invoked during shared library loading to create a new OS +// thread to perform the runtime initialization. This method is similar to +// _cgo_sys_thread_start except that it doesn't update any Go state. + +//go:linkname x_cgo_thread_start_trampoline x_cgo_thread_start_trampoline +//go:linkname _cgo_thread_start _cgo_thread_start +var x_cgo_thread_start_trampoline byte +var _cgo_thread_start = &x_cgo_thread_start_trampoline + +// Notifies that the runtime has been initialized. +// +// We currently block at every CGO entry point (via _cgo_wait_runtime_init_done) +// to ensure that the runtime has been initialized before the CGO call is +// executed. This is necessary for shared libraries where we kickoff runtime +// initialization in a separate thread and return without waiting for this +// thread to complete the init. + +//go:linkname x_cgo_notify_runtime_init_done_trampoline x_cgo_notify_runtime_init_done_trampoline +//go:linkname _cgo_notify_runtime_init_done _cgo_notify_runtime_init_done +var x_cgo_notify_runtime_init_done_trampoline byte +var _cgo_notify_runtime_init_done = &x_cgo_notify_runtime_init_done_trampoline + +// Indicates whether a dummy thread key has been created or not. +// +// When calling go exported function from C, we register a destructor +// callback, for a dummy thread key, by using pthread_key_create. + +//go:linkname _cgo_pthread_key_created _cgo_pthread_key_created +var x_cgo_pthread_key_created uintptr +var _cgo_pthread_key_created = &x_cgo_pthread_key_created + +// Set the x_crosscall2_ptr C function pointer variable point to crosscall2. +// It's for the runtime package to call at init time. +func set_crosscall2() { + // nothing needs to be done here for fakecgo + // because it's possible to just call cgocallback directly +} + +//go:linkname _set_crosscall2 runtime.set_crosscall2 +var _set_crosscall2 = set_crosscall2 + +// Store the g into the thread-specific value. +// So that pthread_key_destructor will dropm when the thread is exiting. + +//go:linkname x_cgo_bindm_trampoline x_cgo_bindm_trampoline +//go:linkname _cgo_bindm _cgo_bindm +var x_cgo_bindm_trampoline byte +var _cgo_bindm = &x_cgo_bindm_trampoline + +// TODO: decide if we need x_cgo_set_context_function +// TODO: decide if we need _cgo_yield + +var ( + // In Go 1.20 the race detector was rewritten to pure Go + // on darwin. This means that when CGO_ENABLED=0 is set + // fakecgo is built with race detector code. This is not + // good since this code is pretending to be C. The go:norace + // pragma is not enough, since it only applies to the native + // ABIInternal function. The ABIO wrapper (which is necessary, + // since all references to text symbols from assembly will use it) + // does not inherit the go:norace pragma, so it will still be + // instrumented by the race detector. + // + // To circumvent this issue, using closure calls in the + // assembly, which forces the compiler to use the ABIInternal + // native implementation (which has go:norace) instead. + threadentry_call = threadentry + x_cgo_init_call = x_cgo_init + x_cgo_setenv_call = x_cgo_setenv + x_cgo_unsetenv_call = x_cgo_unsetenv + x_cgo_thread_start_call = x_cgo_thread_start +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go new file mode 100644 index 00000000000..be82f7dfca9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/doc.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +// Package fakecgo implements the Cgo runtime (runtime/cgo) entirely in Go. +// This allows code that calls into C to function properly when CGO_ENABLED=0. +// +// # Goals +// +// fakecgo attempts to replicate the same naming structure as in the runtime. +// For example, functions that have the prefix "gcc_*" are named "go_*". +// This makes it easier to port other GOOSs and GOARCHs as well as to keep +// it in sync with runtime/cgo. +// +// # Support +// +// Currently, fakecgo only supports macOS on amd64 & arm64. It also cannot +// be used with -buildmode=c-archive because that requires special initialization +// that fakecgo does not implement at the moment. +// +// # Usage +// +// Using fakecgo is easy just import _ "github.com/ebitengine/purego" and then +// set the environment variable CGO_ENABLED=0. +// The recommended usage for fakecgo is to prefer using runtime/cgo if possible +// but if cross-compiling or fast build times are important fakecgo is available. +// Purego will pick which ever Cgo runtime is available and prefer the one that +// comes with Go (runtime/cgo). +package fakecgo + +//go:generate go run gen.go diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go new file mode 100644 index 00000000000..bb73a709e69 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/freebsd.go @@ -0,0 +1,27 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build freebsd && !cgo + +package fakecgo + +import _ "unsafe" // for go:linkname + +// Supply environ and __progname, because we don't +// link against the standard FreeBSD crt0.o and the +// libc dynamic library needs them. + +// Note: when building with cross-compiling or CGO_ENABLED=0, add +// the following argument to `go` so that these symbols are defined by +// making fakecgo the Cgo. +// -gcflags="github.com/ebitengine/purego/internal/fakecgo=-std" + +//go:linkname _environ environ +//go:linkname _progname __progname + +//go:cgo_export_dynamic environ +//go:cgo_export_dynamic __progname + +var _environ uintptr +var _progname uintptr diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go new file mode 100644 index 00000000000..39f5ff1f06a --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_amd64.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go new file mode 100644 index 00000000000..d0868f0f790 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_darwin_arm64.go @@ -0,0 +1,88 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +//go:norace +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + size = pthread_get_stacksize_np(pthread_self()) + pthread_attr_init(&attr) + pthread_attr_setstacksize(&attr, size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +//go:norace +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + // TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_thread_exception_port(); + //#endif + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +//go:norace +func x_cgo_init(g *G, setg uintptr) { + var size size_t + + setg_func = setg + size = pthread_get_stacksize_np(pthread_self()) + g.stacklo = uintptr(unsafe.Add(unsafe.Pointer(&size), -size+4096)) + + //TODO: support ios + //#if TARGET_OS_IPHONE + // darwin_arm_init_mach_exception_handler(); + // darwin_arm_init_thread_exception_port(); + // init_working_dir(); + //#endif +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go new file mode 100644 index 00000000000..c9ff7156a89 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go new file mode 100644 index 00000000000..e3a060b9350 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_freebsd_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + // fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go new file mode 100644 index 00000000000..e5cb46be455 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_libinit.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +var ( + pthread_g pthread_key_t + + runtime_init_cond = PTHREAD_COND_INITIALIZER + runtime_init_mu = PTHREAD_MUTEX_INITIALIZER + runtime_init_done int +) + +//go:nosplit +func x_cgo_notify_runtime_init_done() { + pthread_mutex_lock(&runtime_init_mu) + runtime_init_done = 1 + pthread_cond_broadcast(&runtime_init_cond) + pthread_mutex_unlock(&runtime_init_mu) +} + +// Store the g into a thread-specific value associated with the pthread key pthread_g. +// And pthread_key_destructor will dropm when the thread is exiting. +func x_cgo_bindm(g unsafe.Pointer) { + // We assume this will always succeed, otherwise, there might be extra M leaking, + // when a C thread exits after a cgo call. + // We only invoke this function once per thread in runtime.needAndBindM, + // and the next calls just reuse the bound m. + pthread_setspecific(pthread_g, g) +} + +// _cgo_try_pthread_create retries pthread_create if it fails with +// EAGAIN. +// +//go:nosplit +//go:norace +func _cgo_try_pthread_create(thread *pthread_t, attr *pthread_attr_t, pfn unsafe.Pointer, arg *ThreadStart) int { + var ts syscall.Timespec + // tries needs to be the same type as syscall.Timespec.Nsec + // but the fields are int32 on 32bit and int64 on 64bit. + // tries is assigned to syscall.Timespec.Nsec in order to match its type. + tries := ts.Nsec + var err int + + for tries = 0; tries < 20; tries++ { + err = int(pthread_create(thread, attr, pfn, unsafe.Pointer(arg))) + if err == 0 { + pthread_detach(*thread) + return 0 + } + if err != int(syscall.EAGAIN) { + return err + } + ts.Sec = 0 + ts.Nsec = (tries + 1) * 1000 * 1000 // Milliseconds. + nanosleep(&ts, nil) + } + return int(syscall.EAGAIN) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go new file mode 100644 index 00000000000..c9ff7156a89 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_amd64.go @@ -0,0 +1,95 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + // runtime/cgo uses __builtin_frame_address(0) instead of `uintptr(unsafe.Pointer(&size))` + // but this should be OK since we are taking the address of the first variable in this function. + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go new file mode 100644 index 00000000000..a3b1cca59a0 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_linux_arm64.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo + +package fakecgo + +import "unsafe" + +//go:nosplit +func _cgo_sys_thread_start(ts *ThreadStart) { + var attr pthread_attr_t + var ign, oset sigset_t + var p pthread_t + var size size_t + var err int + + //fprintf(stderr, "runtime/cgo: _cgo_sys_thread_start: fn=%p, g=%p\n", ts->fn, ts->g); // debug + sigfillset(&ign) + pthread_sigmask(SIG_SETMASK, &ign, &oset) + + pthread_attr_init(&attr) + pthread_attr_getstacksize(&attr, &size) + // Leave stacklo=0 and set stackhi=size; mstart will do the rest. + ts.g.stackhi = uintptr(size) + + err = _cgo_try_pthread_create(&p, &attr, unsafe.Pointer(threadentry_trampolineABI0), ts) + + pthread_sigmask(SIG_SETMASK, &oset, nil) + + if err != 0 { + print("fakecgo: pthread_create failed: ") + println(err) + abort() + } +} + +// threadentry_trampolineABI0 maps the C ABI to Go ABI then calls the Go function +// +//go:linkname x_threadentry_trampoline threadentry_trampoline +var x_threadentry_trampoline byte +var threadentry_trampolineABI0 = &x_threadentry_trampoline + +//go:nosplit +func threadentry(v unsafe.Pointer) unsafe.Pointer { + ts := *(*ThreadStart)(v) + free(v) + + setg_trampoline(setg_func, uintptr(unsafe.Pointer(ts.g))) + + // faking funcs in go is a bit a... involved - but the following works :) + fn := uintptr(unsafe.Pointer(&ts.fn)) + (*(*func())(unsafe.Pointer(&fn)))() + + return nil +} + +// here we will store a pointer to the provided setg func +var setg_func uintptr + +// x_cgo_init(G *g, void (*setg)(void*)) (runtime/cgo/gcc_linux_amd64.c) +// This get's called during startup, adjusts stacklo, and provides a pointer to setg_gcc for us +// Additionally, if we set _cgo_init to non-null, go won't do it's own TLS setup +// This function can't be go:systemstack since go is not in a state where the systemcheck would work. +// +//go:nosplit +func x_cgo_init(g *G, setg uintptr) { + var size size_t + var attr *pthread_attr_t + + /* The memory sanitizer distributed with versions of clang + before 3.8 has a bug: if you call mmap before malloc, mmap + may return an address that is later overwritten by the msan + library. Avoid this problem by forcing a call to malloc + here, before we ever call malloc. + + This is only required for the memory sanitizer, so it's + unfortunate that we always run it. It should be possible + to remove this when we no longer care about versions of + clang before 3.8. The test for this is + misc/cgo/testsanitizers. + + GCC works hard to eliminate a seemingly unnecessary call to + malloc, so we actually use the memory we allocate. */ + + setg_func = setg + attr = (*pthread_attr_t)(malloc(unsafe.Sizeof(*attr))) + if attr == nil { + println("fakecgo: malloc failed") + abort() + } + pthread_attr_init(attr) + pthread_attr_getstacksize(attr, &size) + g.stacklo = uintptr(unsafe.Pointer(&size)) - uintptr(size) + 4096 + pthread_attr_destroy(attr) + free(unsafe.Pointer(attr)) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go new file mode 100644 index 00000000000..e42d84f0b75 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_setenv.go @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +//go:nosplit +//go:norace +func x_cgo_setenv(arg *[2]*byte) { + setenv(arg[0], arg[1], 1) +} + +//go:nosplit +//go:norace +func x_cgo_unsetenv(arg *[1]*byte) { + unsetenv(arg[0]) +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go new file mode 100644 index 00000000000..0ac10d1f157 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/go_util.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import "unsafe" + +// _cgo_thread_start is split into three parts in cgo since only one part is system dependent (keep it here for easier handling) + +// _cgo_thread_start(ThreadStart *arg) (runtime/cgo/gcc_util.c) +// This get's called instead of the go code for creating new threads +// -> pthread_* stuff is used, so threads are setup correctly for C +// If this is missing, TLS is only setup correctly on thread 1! +// This function should be go:systemstack instead of go:nosplit (but that requires runtime) +// +//go:nosplit +//go:norace +func x_cgo_thread_start(arg *ThreadStart) { + var ts *ThreadStart + // Make our own copy that can persist after we return. + // _cgo_tsan_acquire(); + ts = (*ThreadStart)(malloc(unsafe.Sizeof(*ts))) + // _cgo_tsan_release(); + if ts == nil { + println("fakecgo: out of memory in thread_start") + abort() + } + // *ts = *arg would cause a writebarrier so copy using slices + s1 := unsafe.Slice((*uintptr)(unsafe.Pointer(ts)), unsafe.Sizeof(*ts)/8) + s2 := unsafe.Slice((*uintptr)(unsafe.Pointer(arg)), unsafe.Sizeof(*arg)/8) + for i := range s2 { + s1[i] = s2[i] + } + _cgo_sys_thread_start(ts) // OS-dependent half +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go new file mode 100644 index 00000000000..28af41cc640 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/iscgo.go @@ -0,0 +1,19 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +// The runtime package contains an uninitialized definition +// for runtime·iscgo. Override it to tell the runtime we're here. +// There are various function pointers that should be set too, +// but those depend on dynamic linker magic to get initialized +// correctly, and sometimes they break. This variable is a +// backup: it depends only on old C style static linking rules. + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname _iscgo runtime.iscgo +var _iscgo bool = true diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go new file mode 100644 index 00000000000..74626c64a0e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +type ( + size_t uintptr + sigset_t [128]byte + pthread_attr_t [64]byte + pthread_t int + pthread_key_t uint64 +) + +// for pthread_sigmask: + +type sighow int32 + +const ( + SIG_BLOCK sighow = 0 + SIG_UNBLOCK sighow = 1 + SIG_SETMASK sighow = 2 +) + +type G struct { + stacklo uintptr + stackhi uintptr +} + +type ThreadStart struct { + g *G + tls *uintptr + fn uintptr +} diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go new file mode 100644 index 00000000000..af148333f6d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_darwin.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_mutex_t struct { + sig int64 + opaque [56]byte + } + pthread_cond_t struct { + sig int64 + opaque [40]byte + } +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{sig: 0x3CB0B1BB} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{sig: 0x32AAABA7} +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go new file mode 100644 index 00000000000..ca1f722c939 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_freebsd.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t uintptr + pthread_mutex_t uintptr +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t(0) + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t(0) +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go new file mode 100644 index 00000000000..c4b6e9ea5a4 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/libcgo_linux.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +type ( + pthread_cond_t [48]byte + pthread_mutex_t [48]byte +) + +var ( + PTHREAD_COND_INITIALIZER = pthread_cond_t{} + PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{} +) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go new file mode 100644 index 00000000000..f30af0e1515 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/setenv.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import _ "unsafe" // for go:linkname + +//go:linkname x_cgo_setenv_trampoline x_cgo_setenv_trampoline +//go:linkname _cgo_setenv runtime._cgo_setenv +var x_cgo_setenv_trampoline byte +var _cgo_setenv = &x_cgo_setenv_trampoline + +//go:linkname x_cgo_unsetenv_trampoline x_cgo_unsetenv_trampoline +//go:linkname _cgo_unsetenv runtime._cgo_unsetenv +var x_cgo_unsetenv_trampoline byte +var _cgo_unsetenv = &x_cgo_unsetenv_trampoline diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go new file mode 100644 index 00000000000..3d19fd822a7 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols.go @@ -0,0 +1,181 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package fakecgo + +import ( + "syscall" + "unsafe" +) + +// setg_trampoline calls setg with the G provided +func setg_trampoline(setg uintptr, G uintptr) + +// call5 takes fn the C function and 5 arguments and calls the function with those arguments +func call5(fn, a1, a2, a3, a4, a5 uintptr) uintptr + +func malloc(size uintptr) unsafe.Pointer { + ret := call5(mallocABI0, uintptr(size), 0, 0, 0, 0) + // this indirection is to avoid go vet complaining about possible misuse of unsafe.Pointer + return *(*unsafe.Pointer)(unsafe.Pointer(&ret)) +} + +func free(ptr unsafe.Pointer) { + call5(freeABI0, uintptr(ptr), 0, 0, 0, 0) +} + +func setenv(name *byte, value *byte, overwrite int32) int32 { + return int32(call5(setenvABI0, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), uintptr(overwrite), 0, 0)) +} + +func unsetenv(name *byte) int32 { + return int32(call5(unsetenvABI0, uintptr(unsafe.Pointer(name)), 0, 0, 0, 0)) +} + +func sigfillset(set *sigset_t) int32 { + return int32(call5(sigfillsetABI0, uintptr(unsafe.Pointer(set)), 0, 0, 0, 0)) +} + +func nanosleep(ts *syscall.Timespec, rem *syscall.Timespec) int32 { + return int32(call5(nanosleepABI0, uintptr(unsafe.Pointer(ts)), uintptr(unsafe.Pointer(rem)), 0, 0, 0)) +} + +func abort() { + call5(abortABI0, 0, 0, 0, 0, 0) +} + +func pthread_attr_init(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_initABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +func pthread_create(thread *pthread_t, attr *pthread_attr_t, start unsafe.Pointer, arg unsafe.Pointer) int32 { + return int32(call5(pthread_createABI0, uintptr(unsafe.Pointer(thread)), uintptr(unsafe.Pointer(attr)), uintptr(start), uintptr(arg), 0)) +} + +func pthread_detach(thread pthread_t) int32 { + return int32(call5(pthread_detachABI0, uintptr(thread), 0, 0, 0, 0)) +} + +func pthread_sigmask(how sighow, ign *sigset_t, oset *sigset_t) int32 { + return int32(call5(pthread_sigmaskABI0, uintptr(how), uintptr(unsafe.Pointer(ign)), uintptr(unsafe.Pointer(oset)), 0, 0)) +} + +func pthread_self() pthread_t { + return pthread_t(call5(pthread_selfABI0, 0, 0, 0, 0, 0)) +} + +func pthread_get_stacksize_np(thread pthread_t) size_t { + return size_t(call5(pthread_get_stacksize_npABI0, uintptr(thread), 0, 0, 0, 0)) +} + +func pthread_attr_getstacksize(attr *pthread_attr_t, stacksize *size_t) int32 { + return int32(call5(pthread_attr_getstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(unsafe.Pointer(stacksize)), 0, 0, 0)) +} + +func pthread_attr_setstacksize(attr *pthread_attr_t, size size_t) int32 { + return int32(call5(pthread_attr_setstacksizeABI0, uintptr(unsafe.Pointer(attr)), uintptr(size), 0, 0, 0)) +} + +func pthread_attr_destroy(attr *pthread_attr_t) int32 { + return int32(call5(pthread_attr_destroyABI0, uintptr(unsafe.Pointer(attr)), 0, 0, 0, 0)) +} + +func pthread_mutex_lock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_lockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +func pthread_mutex_unlock(mutex *pthread_mutex_t) int32 { + return int32(call5(pthread_mutex_unlockABI0, uintptr(unsafe.Pointer(mutex)), 0, 0, 0, 0)) +} + +func pthread_cond_broadcast(cond *pthread_cond_t) int32 { + return int32(call5(pthread_cond_broadcastABI0, uintptr(unsafe.Pointer(cond)), 0, 0, 0, 0)) +} + +func pthread_setspecific(key pthread_key_t, value unsafe.Pointer) int32 { + return int32(call5(pthread_setspecificABI0, uintptr(key), uintptr(value), 0, 0, 0)) +} + +//go:linkname _malloc _malloc +var _malloc uintptr +var mallocABI0 = uintptr(unsafe.Pointer(&_malloc)) + +//go:linkname _free _free +var _free uintptr +var freeABI0 = uintptr(unsafe.Pointer(&_free)) + +//go:linkname _setenv _setenv +var _setenv uintptr +var setenvABI0 = uintptr(unsafe.Pointer(&_setenv)) + +//go:linkname _unsetenv _unsetenv +var _unsetenv uintptr +var unsetenvABI0 = uintptr(unsafe.Pointer(&_unsetenv)) + +//go:linkname _sigfillset _sigfillset +var _sigfillset uintptr +var sigfillsetABI0 = uintptr(unsafe.Pointer(&_sigfillset)) + +//go:linkname _nanosleep _nanosleep +var _nanosleep uintptr +var nanosleepABI0 = uintptr(unsafe.Pointer(&_nanosleep)) + +//go:linkname _abort _abort +var _abort uintptr +var abortABI0 = uintptr(unsafe.Pointer(&_abort)) + +//go:linkname _pthread_attr_init _pthread_attr_init +var _pthread_attr_init uintptr +var pthread_attr_initABI0 = uintptr(unsafe.Pointer(&_pthread_attr_init)) + +//go:linkname _pthread_create _pthread_create +var _pthread_create uintptr +var pthread_createABI0 = uintptr(unsafe.Pointer(&_pthread_create)) + +//go:linkname _pthread_detach _pthread_detach +var _pthread_detach uintptr +var pthread_detachABI0 = uintptr(unsafe.Pointer(&_pthread_detach)) + +//go:linkname _pthread_sigmask _pthread_sigmask +var _pthread_sigmask uintptr +var pthread_sigmaskABI0 = uintptr(unsafe.Pointer(&_pthread_sigmask)) + +//go:linkname _pthread_self _pthread_self +var _pthread_self uintptr +var pthread_selfABI0 = uintptr(unsafe.Pointer(&_pthread_self)) + +//go:linkname _pthread_get_stacksize_np _pthread_get_stacksize_np +var _pthread_get_stacksize_np uintptr +var pthread_get_stacksize_npABI0 = uintptr(unsafe.Pointer(&_pthread_get_stacksize_np)) + +//go:linkname _pthread_attr_getstacksize _pthread_attr_getstacksize +var _pthread_attr_getstacksize uintptr +var pthread_attr_getstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_getstacksize)) + +//go:linkname _pthread_attr_setstacksize _pthread_attr_setstacksize +var _pthread_attr_setstacksize uintptr +var pthread_attr_setstacksizeABI0 = uintptr(unsafe.Pointer(&_pthread_attr_setstacksize)) + +//go:linkname _pthread_attr_destroy _pthread_attr_destroy +var _pthread_attr_destroy uintptr +var pthread_attr_destroyABI0 = uintptr(unsafe.Pointer(&_pthread_attr_destroy)) + +//go:linkname _pthread_mutex_lock _pthread_mutex_lock +var _pthread_mutex_lock uintptr +var pthread_mutex_lockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_lock)) + +//go:linkname _pthread_mutex_unlock _pthread_mutex_unlock +var _pthread_mutex_unlock uintptr +var pthread_mutex_unlockABI0 = uintptr(unsafe.Pointer(&_pthread_mutex_unlock)) + +//go:linkname _pthread_cond_broadcast _pthread_cond_broadcast +var _pthread_cond_broadcast uintptr +var pthread_cond_broadcastABI0 = uintptr(unsafe.Pointer(&_pthread_cond_broadcast)) + +//go:linkname _pthread_setspecific _pthread_setspecific +var _pthread_setspecific uintptr +var pthread_setspecificABI0 = uintptr(unsafe.Pointer(&_pthread_setspecific)) diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go new file mode 100644 index 00000000000..54aaa46285c --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_darwin.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_free free "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_setenv setenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_unsetenv unsetenv "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_sigfillset sigfillset "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_nanosleep nanosleep "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_abort abort "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_create pthread_create "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_self pthread_self "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "/usr/lib/libSystem.B.dylib" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "/usr/lib/libSystem.B.dylib" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go new file mode 100644 index 00000000000..81538119799 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_freebsd.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.7" +//go:cgo_import_dynamic purego_free free "libc.so.7" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.7" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.7" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.7" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.7" +//go:cgo_import_dynamic purego_abort abort "libc.so.7" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go new file mode 100644 index 00000000000..180057d0156 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/symbols_linux.go @@ -0,0 +1,29 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package fakecgo + +//go:cgo_import_dynamic purego_malloc malloc "libc.so.6" +//go:cgo_import_dynamic purego_free free "libc.so.6" +//go:cgo_import_dynamic purego_setenv setenv "libc.so.6" +//go:cgo_import_dynamic purego_unsetenv unsetenv "libc.so.6" +//go:cgo_import_dynamic purego_sigfillset sigfillset "libc.so.6" +//go:cgo_import_dynamic purego_nanosleep nanosleep "libc.so.6" +//go:cgo_import_dynamic purego_abort abort "libc.so.6" +//go:cgo_import_dynamic purego_pthread_attr_init pthread_attr_init "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_create pthread_create "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_detach pthread_detach "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_sigmask pthread_sigmask "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_self pthread_self "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_get_stacksize_np pthread_get_stacksize_np "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_getstacksize pthread_attr_getstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_setstacksize pthread_attr_setstacksize "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_attr_destroy pthread_attr_destroy "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_lock pthread_mutex_lock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_mutex_unlock pthread_mutex_unlock "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_cond_broadcast pthread_cond_broadcast "libpthread.so.0" +//go:cgo_import_dynamic purego_pthread_setspecific pthread_setspecific "libpthread.so.0" diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s new file mode 100644 index 00000000000..c9a3cc09eb3 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_amd64.s @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || linux || freebsd) + +/* +trampoline for emulating required C functions for cgo in go (see cgo.go) +(we convert cdecl calling convention to go and vice-versa) + +Since we're called from go and call into C we can cheat a bit with the calling conventions: + - in go all the registers are caller saved + - in C we have a couple of callee saved registers + +=> we can use BX, R12, R13, R14, R15 instead of the stack + +C Calling convention cdecl used here (we only need integer args): +1. arg: DI +2. arg: SI +3. arg: DX +4. arg: CX +5. arg: R8 +6. arg: R9 +We don't need floats with these functions -> AX=0 +return value will be in AX +*/ +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ SI, BX + MOVQ ·x_cgo_init_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_thread_start_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_setenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $8 + MOVQ DI, AX + MOVQ ·x_cgo_unsetenv_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVQ G+8(FP), DI + MOVQ setg+0(FP), BX + XORL AX, AX + CALL BX + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $16 + MOVQ DI, AX + MOVQ ·threadentry_call(SB), DX + MOVQ (DX), CX + CALL CX + RET + +TEXT ·call5(SB), NOSPLIT, $0-56 + MOVQ fn+0(FP), BX + MOVQ a1+8(FP), DI + MOVQ a2+16(FP), SI + MOVQ a3+24(FP), DX + MOVQ a4+32(FP), CX + MOVQ a5+40(FP), R8 + + XORL AX, AX // no floats + + PUSHQ BP // save BP + MOVQ SP, BP // save SP inside BP bc BP is callee-saved + SUBQ $16, SP // allocate space for alignment + ANDQ $-16, SP // align on 16 bytes for SSE + + CALL BX + + MOVQ BP, SP // get SP back + POPQ BP // restore BP + + MOVQ AX, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s new file mode 100644 index 00000000000..9dbdbc0139d --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_arm64.s @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" +#include "go_asm.h" + +// these trampolines map the gcc ABI to Go ABI and then calls into the Go equivalent functions. + +TEXT x_cgo_init_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD R1, 16(RSP) + MOVD ·x_cgo_init_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_thread_start_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_thread_start_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_setenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_setenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_unsetenv_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·x_cgo_unsetenv_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + RET + +TEXT x_cgo_notify_runtime_init_done_trampoline(SB), NOSPLIT, $0-0 + CALL ·x_cgo_notify_runtime_init_done(SB) + RET + +TEXT x_cgo_bindm_trampoline(SB), NOSPLIT, $0 + CALL ·x_cgo_bindm(SB) + RET + +// func setg_trampoline(setg uintptr, g uintptr) +TEXT ·setg_trampoline(SB), NOSPLIT, $0-16 + MOVD G+8(FP), R0 + MOVD setg+0(FP), R1 + CALL R1 + RET + +TEXT threadentry_trampoline(SB), NOSPLIT, $0-0 + MOVD R0, 8(RSP) + MOVD ·threadentry_call(SB), R26 + MOVD (R26), R2 + CALL (R2) + MOVD $0, R0 // TODO: get the return value from threadentry + RET + +TEXT ·call5(SB), NOSPLIT, $0-0 + MOVD fn+0(FP), R6 + MOVD a1+8(FP), R0 + MOVD a2+16(FP), R1 + MOVD a3+24(FP), R2 + MOVD a4+32(FP), R3 + MOVD a5+40(FP), R4 + CALL R6 + MOVD R0, ret+48(FP) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s new file mode 100644 index 00000000000..a65b2012c1b --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/fakecgo/trampolines_stubs.s @@ -0,0 +1,90 @@ +// Code generated by 'go generate' with gen.go. DO NOT EDIT. + +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +#include "textflag.h" + +// these stubs are here because it is not possible to go:linkname directly the C functions on darwin arm64 + +TEXT _malloc(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_malloc(SB) + RET + +TEXT _free(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_free(SB) + RET + +TEXT _setenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_setenv(SB) + RET + +TEXT _unsetenv(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_unsetenv(SB) + RET + +TEXT _sigfillset(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_sigfillset(SB) + RET + +TEXT _nanosleep(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_nanosleep(SB) + RET + +TEXT _abort(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_abort(SB) + RET + +TEXT _pthread_attr_init(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_init(SB) + RET + +TEXT _pthread_create(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_create(SB) + RET + +TEXT _pthread_detach(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_detach(SB) + RET + +TEXT _pthread_sigmask(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_sigmask(SB) + RET + +TEXT _pthread_self(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_self(SB) + RET + +TEXT _pthread_get_stacksize_np(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_get_stacksize_np(SB) + RET + +TEXT _pthread_attr_getstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_getstacksize(SB) + RET + +TEXT _pthread_attr_setstacksize(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_setstacksize(SB) + RET + +TEXT _pthread_attr_destroy(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_attr_destroy(SB) + RET + +TEXT _pthread_mutex_lock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_lock(SB) + RET + +TEXT _pthread_mutex_unlock(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_mutex_unlock(SB) + RET + +TEXT _pthread_cond_broadcast(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_cond_broadcast(SB) + RET + +TEXT _pthread_setspecific(SB), NOSPLIT|NOFRAME, $0-0 + JMP purego_pthread_setspecific(SB) + RET diff --git a/vendor/github.com/ebitengine/purego/internal/strings/strings.go b/vendor/github.com/ebitengine/purego/internal/strings/strings.go new file mode 100644 index 00000000000..5b0d2522554 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/internal/strings/strings.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package strings + +import ( + "unsafe" +) + +// hasSuffix tests whether the string s ends with suffix. +func hasSuffix(s, suffix string) bool { + return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix +} + +// CString converts a go string to *byte that can be passed to C code. +func CString(name string) *byte { + if hasSuffix(name, "\x00") { + return &(*(*[]byte)(unsafe.Pointer(&name)))[0] + } + b := make([]byte, len(name)+1) + copy(b, name) + return &b[0] +} + +// GoString copies a null-terminated char* to a Go string. +func GoString(c uintptr) string { + // We take the address and then dereference it to trick go vet from creating a possible misuse of unsafe.Pointer + ptr := *(*unsafe.Pointer)(unsafe.Pointer(&c)) + if ptr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(ptr, uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice((*byte)(ptr), length)) +} diff --git a/vendor/github.com/ebitengine/purego/is_ios.go b/vendor/github.com/ebitengine/purego/is_ios.go new file mode 100644 index 00000000000..ed31da97824 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/is_ios.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo + +package purego + +// if you are getting this error it means that you have +// CGO_ENABLED=0 while trying to build for ios. +// purego does not support this mode yet. +// the fix is to set CGO_ENABLED=1 which will require +// a C compiler. +var _ = _PUREGO_REQUIRES_CGO_ON_IOS diff --git a/vendor/github.com/ebitengine/purego/nocgo.go b/vendor/github.com/ebitengine/purego/nocgo.go new file mode 100644 index 00000000000..5b989ea814e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/nocgo.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build !cgo && (darwin || freebsd || linux) + +package purego + +// if CGO_ENABLED=0 import fakecgo to setup the Cgo runtime correctly. +// This is required since some frameworks need TLS setup the C way which Go doesn't do. +// We currently don't support ios in fakecgo mode so force Cgo or fail +// +// The way that the Cgo runtime (runtime/cgo) works is by setting some variables found +// in runtime with non-null GCC compiled functions. The variables that are replaced are +// var ( +// iscgo bool // in runtime/cgo.go +// _cgo_init unsafe.Pointer // in runtime/cgo.go +// _cgo_thread_start unsafe.Pointer // in runtime/cgo.go +// _cgo_notify_runtime_init_done unsafe.Pointer // in runtime/cgo.go +// _cgo_setenv unsafe.Pointer // in runtime/env_posix.go +// _cgo_unsetenv unsafe.Pointer // in runtime/env_posix.go +// ) +// importing fakecgo will set these (using //go:linkname) with functions written +// entirely in Go (except for some assembly trampolines to change GCC ABI to Go ABI). +// Doing so makes it possible to build applications that call into C without CGO_ENABLED=1. +import _ "github.com/ebitengine/purego/internal/fakecgo" diff --git a/vendor/github.com/ebitengine/purego/struct_amd64.go b/vendor/github.com/ebitengine/purego/struct_amd64.go new file mode 100644 index 00000000000..f3514c984ef --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_amd64.go @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + if isAllFloats(outType) { + // 2 float32s or 1 float64s are return in the float register + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.f1})).Elem() + } + // up to 8 bytes is returned in RAX + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{syscall.a1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats(outType) { + r1 = syscall.f1 + r2 = syscall.f2 + } else { + // check first 8 bytes if it's floats + hasFirstFloat := false + f1 := outType.Field(0).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && outType.Field(1).Type.Kind() == reflect.Float32 { + r1 = syscall.f1 + hasFirstFloat = true + } + + // find index of the field that starts the second 8 bytes + var i int + for i = 0; i < outType.NumField(); i++ { + if outType.Field(i).Offset == 8 { + break + } + } + + // check last 8 bytes if they are floats + f1 = outType.Field(i).Type + if f1.Kind() == reflect.Float64 || f1.Kind() == reflect.Float32 && i+1 == outType.NumField() { + r2 = syscall.f1 + } else if hasFirstFloat { + // if the first field was a float then that means the second integer field + // comes from the first integer register + r2 = syscall.a1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + // create struct from the Go pointer created above + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.a1))).Elem() + } +} + +func isAllFloats(ty reflect.Type) bool { + for i := 0; i < ty.NumField(); i++ { + f := ty.Field(i) + switch f.Type.Kind() { + case reflect.Float64, reflect.Float32: + default: + return false + } + } + return true +} + +// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf +// https://gitlab.com/x86-psABIs/x86-64-ABI +// Class determines where the 8 byte value goes. +// Higher value classes win over lower value classes +const ( + _NO_CLASS = 0b0000 + _SSE = 0b0001 + _X87 = 0b0011 // long double not used in Go + _INTEGER = 0b0111 + _MEMORY = 0b1111 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + if v.Type().Size() == 0 { + return keepAlive + } + + // if greater than 64 bytes place on stack + if v.Type().Size() > 8*8 { + placeStack(v, addStack) + return keepAlive + } + var ( + savedNumFloats = *numFloats + savedNumInts = *numInts + savedNumStack = *numStack + ) + placeOnStack := postMerger(v.Type()) || !tryPlaceRegister(v, addFloat, addInt) + if placeOnStack { + // reset any values placed in registers + *numFloats = savedNumFloats + *numInts = savedNumInts + *numStack = savedNumStack + placeStack(v, addStack) + } + return keepAlive +} + +func postMerger(t reflect.Type) (passInMemory bool) { + // (c) If the size of the aggregate exceeds two eightbytes and the first eight- byte isn’t SSE or any other + // eightbyte isn’t SSEUP, the whole argument is passed in memory. + if t.Kind() != reflect.Struct { + return false + } + if t.Size() <= 2*8 { + return false + } + return true // Go does not have an SSE/SEEUP type so this is always true +} + +func tryPlaceRegister(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) (ok bool) { + ok = true + var val uint64 + var shift byte // # of bits to shift + var flushed bool + class := _NO_CLASS + flushIfNeeded := func() { + if flushed { + return + } + flushed = true + if class == _SSE { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + val = 0 + shift = 0 + class = _NO_CLASS + } + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + + for i := 0; i < numFields; i++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(i) + } else { + f = v.Index(i) + } + switch f.Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 + } + shift += 8 + class |= _INTEGER + case reflect.Pointer: + ok = false + return + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INTEGER + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INTEGER + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INTEGER + case reflect.Int64, reflect.Int: + val = uint64(f.Int()) + shift = 64 + class = _INTEGER + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INTEGER + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INTEGER + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INTEGER + case reflect.Uint64, reflect.Uint: + val = f.Uint() + shift = 64 + class = _INTEGER + case reflect.Float32: + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _SSE + case reflect.Float64: + if v.Type().Size() > 16 { + ok = false + return + } + val = uint64(math.Float64bits(f.Float())) + shift = 64 + class = _SSE + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + + if shift == 64 { + flushIfNeeded() + } else if shift > 64 { + // Should never happen, but may if we forget to reset shift after flush (or forget to flush), + // better fall apart here, than corrupt arguments. + panic("purego: tryPlaceRegisters shift > 64") + } + } + } + + place(v) + flushIfNeeded() + return ok +} + +func placeStack(v reflect.Value, addStack func(uintptr)) { + for i := 0; i < v.Type().NumField(); i++ { + f := v.Field(i) + switch f.Kind() { + case reflect.Pointer: + addStack(f.Pointer()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + addStack(uintptr(f.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + addStack(uintptr(f.Uint())) + case reflect.Float32: + addStack(uintptr(math.Float32bits(float32(f.Float())))) + case reflect.Float64: + addStack(uintptr(math.Float64bits(f.Float()))) + case reflect.Struct: + placeStack(f, addStack) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_arm64.go b/vendor/github.com/ebitengine/purego/struct_arm64.go new file mode 100644 index 00000000000..11c36bd6e47 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_arm64.go @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +package purego + +import ( + "math" + "reflect" + "unsafe" +) + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + outSize := outType.Size() + switch { + case outSize == 0: + return reflect.New(outType).Elem() + case outSize <= 8: + r1 := syscall.a1 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + r1 = syscall.f1 + if numFields == 2 { + r1 = syscall.f2<<32 | syscall.f1 + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a uintptr }{r1})).Elem() + case outSize <= 16: + r1, r2 := syscall.a1, syscall.a2 + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats { + switch numFields { + case 4: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f4<<32 | syscall.f3 + case 3: + r1 = syscall.f2<<32 | syscall.f1 + r2 = syscall.f3 + case 2: + r1 = syscall.f1 + r2 = syscall.f2 + default: + panic("unreachable") + } + } + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b uintptr }{r1, r2})).Elem() + default: + if isAllFloats, numFields := isAllSameFloat(outType); isAllFloats && numFields <= 4 { + switch numFields { + case 4: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c, d uintptr }{syscall.f1, syscall.f2, syscall.f3, syscall.f4})).Elem() + case 3: + return reflect.NewAt(outType, unsafe.Pointer(&struct{ a, b, c uintptr }{syscall.f1, syscall.f2, syscall.f3})).Elem() + default: + panic("unreachable") + } + } + // create struct from the Go pointer created in arm64_r8 + // weird pointer dereference to circumvent go vet + return reflect.NewAt(outType, *(*unsafe.Pointer)(unsafe.Pointer(&syscall.arm64_r8))).Elem() + } +} + +// https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +const ( + _NO_CLASS = 0b00 + _FLOAT = 0b01 + _INT = 0b11 +) + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + if v.Type().Size() == 0 { + return keepAlive + } + + if hva, hfa, size := isHVA(v.Type()), isHFA(v.Type()), v.Type().Size(); hva || hfa || size <= 16 { + // if this doesn't fit entirely in registers then + // each element goes onto the stack + if hfa && *numFloats+v.NumField() > numOfFloats { + *numFloats = numOfFloats + } else if hva && *numInts+v.NumField() > numOfIntegerRegisters() { + *numInts = numOfIntegerRegisters() + } + + placeRegisters(v, addFloat, addInt) + } else { + keepAlive = placeStack(v, keepAlive, addInt) + } + return keepAlive // the struct was allocated so don't panic +} + +func placeRegisters(v reflect.Value, addFloat func(uintptr), addInt func(uintptr)) { + var val uint64 + var shift byte + var flushed bool + class := _NO_CLASS + var place func(v reflect.Value) + place = func(v reflect.Value) { + var numFields int + if v.Kind() == reflect.Struct { + numFields = v.Type().NumField() + } else { + numFields = v.Type().Len() + } + for k := 0; k < numFields; k++ { + flushed = false + var f reflect.Value + if v.Kind() == reflect.Struct { + f = v.Field(k) + } else { + f = v.Index(k) + } + if shift >= 64 { + shift = 0 + flushed = true + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } + switch f.Type().Kind() { + case reflect.Struct: + place(f) + case reflect.Bool: + if f.Bool() { + val |= 1 + } + shift += 8 + class |= _INT + case reflect.Uint8: + val |= f.Uint() << shift + shift += 8 + class |= _INT + case reflect.Uint16: + val |= f.Uint() << shift + shift += 16 + class |= _INT + case reflect.Uint32: + val |= f.Uint() << shift + shift += 32 + class |= _INT + case reflect.Uint64: + addInt(uintptr(f.Uint())) + shift = 0 + flushed = true + case reflect.Int8: + val |= uint64(f.Int()&0xFF) << shift + shift += 8 + class |= _INT + case reflect.Int16: + val |= uint64(f.Int()&0xFFFF) << shift + shift += 16 + class |= _INT + case reflect.Int32: + val |= uint64(f.Int()&0xFFFF_FFFF) << shift + shift += 32 + class |= _INT + case reflect.Int64: + addInt(uintptr(f.Int())) + shift = 0 + flushed = true + case reflect.Float32: + if class == _FLOAT { + addFloat(uintptr(val)) + val = 0 + shift = 0 + } + val |= uint64(math.Float32bits(float32(f.Float()))) << shift + shift += 32 + class |= _FLOAT + case reflect.Float64: + addFloat(uintptr(math.Float64bits(float64(f.Float())))) + shift = 0 + flushed = true + case reflect.Array: + place(f) + default: + panic("purego: unsupported kind " + f.Kind().String()) + } + } + } + place(v) + if !flushed { + if class == _FLOAT { + addFloat(uintptr(val)) + } else { + addInt(uintptr(val)) + } + } +} + +func placeStack(v reflect.Value, keepAlive []interface{}, addInt func(uintptr)) []interface{} { + // Struct is too big to be placed in registers. + // Copy to heap and place the pointer in register + ptrStruct := reflect.New(v.Type()) + ptrStruct.Elem().Set(v) + ptr := ptrStruct.Elem().Addr().UnsafePointer() + keepAlive = append(keepAlive, ptr) + addInt(uintptr(ptr)) + return keepAlive +} + +// isHFA reports a Homogeneous Floating-point Aggregate (HFA) which is a Fundamental Data Type that is a +// Floating-Point type and at most four uniquely addressable members (5.9.5.1 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHFA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || t.NumField() > 4 { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Float32, reflect.Float64: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Float32, reflect.Float64: + return true + default: + return false + } + case reflect.Struct: + for i := 0; i < first.Type.NumField(); i++ { + if !isHFA(first.Type) { + return false + } + } + return true + default: + return false + } +} + +// isHVA reports a Homogeneous Aggregate with a Fundamental Data Type that is a Short-Vector type +// and at most four uniquely addressable members (5.9.5.2 in [Arm64 Calling Convention]). +// A short vector is a machine type that is composed of repeated instances of one fundamental integral or +// floating-point type. It may be 8 or 16 bytes in total size (5.4 in [Arm64 Calling Convention]). +// This type of struct will be placed more compactly than the individual fields. +// +// [Arm64 Calling Convention]: https://github.com/ARM-software/abi-aa/blob/main/sysvabi64/sysvabi64.rst +func isHVA(t reflect.Type) bool { + // round up struct size to nearest 8 see section B.4 + structSize := roundUpTo8(t.Size()) + if structSize == 0 || (structSize != 8 && structSize != 16) { + return false + } + first := t.Field(0) + switch first.Type.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + firstKind := first.Type.Kind() + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() != firstKind { + return false + } + } + return true + case reflect.Array: + switch first.Type.Elem().Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Int8, reflect.Int16, reflect.Int32: + return true + default: + return false + } + default: + return false + } +} diff --git a/vendor/github.com/ebitengine/purego/struct_other.go b/vendor/github.com/ebitengine/purego/struct_other.go new file mode 100644 index 00000000000..9d42adac898 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/struct_other.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2024 The Ebitengine Authors + +//go:build !amd64 && !arm64 + +package purego + +import "reflect" + +func addStruct(v reflect.Value, numInts, numFloats, numStack *int, addInt, addFloat, addStack func(uintptr), keepAlive []interface{}) []interface{} { + panic("purego: struct arguments are not supported") +} + +func getStruct(outType reflect.Type, syscall syscall15Args) (v reflect.Value) { + panic("purego: struct returns are not supported") +} diff --git a/vendor/github.com/ebitengine/purego/sys_amd64.s b/vendor/github.com/ebitengine/purego/sys_amd64.s new file mode 100644 index 00000000000..cabde1a584e --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_amd64.s @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +#include "textflag.h" +#include "abi_amd64.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 80 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT|NOFRAME, $0 + PUSHQ BP + MOVQ SP, BP + SUBQ $STACK_SIZE, SP + MOVQ DI, PTR_ADDRESS(BP) // save the pointer + MOVQ DI, R11 + + MOVQ syscall15Args_f1(R11), X0 // f1 + MOVQ syscall15Args_f2(R11), X1 // f2 + MOVQ syscall15Args_f3(R11), X2 // f3 + MOVQ syscall15Args_f4(R11), X3 // f4 + MOVQ syscall15Args_f5(R11), X4 // f5 + MOVQ syscall15Args_f6(R11), X5 // f6 + MOVQ syscall15Args_f7(R11), X6 // f7 + MOVQ syscall15Args_f8(R11), X7 // f8 + + MOVQ syscall15Args_a1(R11), DI // a1 + MOVQ syscall15Args_a2(R11), SI // a2 + MOVQ syscall15Args_a3(R11), DX // a3 + MOVQ syscall15Args_a4(R11), CX // a4 + MOVQ syscall15Args_a5(R11), R8 // a5 + MOVQ syscall15Args_a6(R11), R9 // a6 + + // push the remaining paramters onto the stack + MOVQ syscall15Args_a7(R11), R12 + MOVQ R12, 0(SP) // push a7 + MOVQ syscall15Args_a8(R11), R12 + MOVQ R12, 8(SP) // push a8 + MOVQ syscall15Args_a9(R11), R12 + MOVQ R12, 16(SP) // push a9 + MOVQ syscall15Args_a10(R11), R12 + MOVQ R12, 24(SP) // push a10 + MOVQ syscall15Args_a11(R11), R12 + MOVQ R12, 32(SP) // push a11 + MOVQ syscall15Args_a12(R11), R12 + MOVQ R12, 40(SP) // push a12 + MOVQ syscall15Args_a13(R11), R12 + MOVQ R12, 48(SP) // push a13 + MOVQ syscall15Args_a14(R11), R12 + MOVQ R12, 56(SP) // push a14 + MOVQ syscall15Args_a15(R11), R12 + MOVQ R12, 64(SP) // push a15 + XORL AX, AX // vararg: say "no float args" + + MOVQ syscall15Args_fn(R11), R10 // fn + CALL R10 + + MOVQ PTR_ADDRESS(BP), DI // get the pointer back + MOVQ AX, syscall15Args_a1(DI) // r1 + MOVQ DX, syscall15Args_a2(DI) // r3 + MOVQ X0, syscall15Args_f1(DI) // f1 + MOVQ X1, syscall15Args_f2(DI) // f2 + + XORL AX, AX // no error (it's ignored anyway) + ADDQ $STACK_SIZE, SP + MOVQ BP, SP + POPQ BP + RET + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + MOVQ 0(SP), AX // save the return address to calculate the cb index + MOVQ 8(SP), R10 // get the return SP so that we can align register args with stack args + ADDQ $8, SP // remove return address from stack, we are not returning to callbackasm, but to its caller. + + // make space for first six int and 8 float arguments below the frame + ADJSP $14*8, SP + MOVSD X0, (1*8)(SP) + MOVSD X1, (2*8)(SP) + MOVSD X2, (3*8)(SP) + MOVSD X3, (4*8)(SP) + MOVSD X4, (5*8)(SP) + MOVSD X5, (6*8)(SP) + MOVSD X6, (7*8)(SP) + MOVSD X7, (8*8)(SP) + MOVQ DI, (9*8)(SP) + MOVQ SI, (10*8)(SP) + MOVQ DX, (11*8)(SP) + MOVQ CX, (12*8)(SP) + MOVQ R8, (13*8)(SP) + MOVQ R9, (14*8)(SP) + LEAQ 8(SP), R8 // R8 = address of args vector + + PUSHQ R10 // push the stack pointer below registers + + // Switch from the host ABI to the Go ABI. + PUSH_REGS_HOST_TO_ABI0() + + // determine index into runtime·cbs table + MOVQ $callbackasm(SB), DX + SUBQ DX, AX + MOVQ $0, DX + MOVQ $5, CX // divide by 5 because each call instruction in ·callbacks is 5 bytes long + DIVL CX + SUBQ $1, AX // subtract 1 because return PC is to the next slot + + // Create a struct callbackArgs on our stack to be passed as + // the "frame" to cgocallback and on to callbackWrap. + // $24 to make enough room for the arguments to runtime.cgocallback + SUBQ $(24+callbackArgs__size), SP + MOVQ AX, (24+callbackArgs_index)(SP) // callback index + MOVQ R8, (24+callbackArgs_args)(SP) // address of args vector + MOVQ $0, (24+callbackArgs_result)(SP) // result + LEAQ 24(SP), AX // take the address of callbackArgs + + // Call cgocallback, which will call callbackWrap(frame). + MOVQ ·callbackWrap_call(SB), DI // Get the ABIInternal function pointer + MOVQ (DI), DI // without by using a closure. + MOVQ AX, SI // frame (address of callbackArgs) + MOVQ $0, CX // context + + CALL crosscall2(SB) // runtime.cgocallback(fn, frame, ctxt uintptr) + + // Get callback result. + MOVQ (24+callbackArgs_result)(SP), AX + ADDQ $(24+callbackArgs__size), SP // remove callbackArgs struct + + POP_REGS_HOST_TO_ABI0() + + POPQ R10 // get the SP back + ADJSP $-14*8, SP // remove arguments + + MOVQ R10, 0(SP) + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_arm64.s b/vendor/github.com/ebitengine/purego/sys_arm64.s new file mode 100644 index 00000000000..a68fdb99ba7 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_arm64.s @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" + +#define STACK_SIZE 64 +#define PTR_ADDRESS (STACK_SIZE - 8) + +// syscall15X calls a function in libc on behalf of the syscall package. +// syscall15X takes a pointer to a struct like: +// struct { +// fn uintptr +// a1 uintptr +// a2 uintptr +// a3 uintptr +// a4 uintptr +// a5 uintptr +// a6 uintptr +// a7 uintptr +// a8 uintptr +// a9 uintptr +// a10 uintptr +// a11 uintptr +// a12 uintptr +// a13 uintptr +// a14 uintptr +// a15 uintptr +// r1 uintptr +// r2 uintptr +// err uintptr +// } +// syscall15X must be called on the g0 stack with the +// C calling convention (use libcCall). +GLOBL ·syscall15XABI0(SB), NOPTR|RODATA, $8 +DATA ·syscall15XABI0(SB)/8, $syscall15X(SB) +TEXT syscall15X(SB), NOSPLIT, $0 + SUB $STACK_SIZE, RSP // push structure pointer + MOVD R0, PTR_ADDRESS(RSP) + MOVD R0, R9 + + FMOVD syscall15Args_f1(R9), F0 // f1 + FMOVD syscall15Args_f2(R9), F1 // f2 + FMOVD syscall15Args_f3(R9), F2 // f3 + FMOVD syscall15Args_f4(R9), F3 // f4 + FMOVD syscall15Args_f5(R9), F4 // f5 + FMOVD syscall15Args_f6(R9), F5 // f6 + FMOVD syscall15Args_f7(R9), F6 // f7 + FMOVD syscall15Args_f8(R9), F7 // f8 + + MOVD syscall15Args_a1(R9), R0 // a1 + MOVD syscall15Args_a2(R9), R1 // a2 + MOVD syscall15Args_a3(R9), R2 // a3 + MOVD syscall15Args_a4(R9), R3 // a4 + MOVD syscall15Args_a5(R9), R4 // a5 + MOVD syscall15Args_a6(R9), R5 // a6 + MOVD syscall15Args_a7(R9), R6 // a7 + MOVD syscall15Args_a8(R9), R7 // a8 + MOVD syscall15Args_arm64_r8(R9), R8 // r8 + + MOVD syscall15Args_a9(R9), R10 + MOVD R10, 0(RSP) // push a9 onto stack + MOVD syscall15Args_a10(R9), R10 + MOVD R10, 8(RSP) // push a10 onto stack + MOVD syscall15Args_a11(R9), R10 + MOVD R10, 16(RSP) // push a11 onto stack + MOVD syscall15Args_a12(R9), R10 + MOVD R10, 24(RSP) // push a12 onto stack + MOVD syscall15Args_a13(R9), R10 + MOVD R10, 32(RSP) // push a13 onto stack + MOVD syscall15Args_a14(R9), R10 + MOVD R10, 40(RSP) // push a14 onto stack + MOVD syscall15Args_a15(R9), R10 + MOVD R10, 48(RSP) // push a15 onto stack + + MOVD syscall15Args_fn(R9), R10 // fn + BL (R10) + + MOVD PTR_ADDRESS(RSP), R2 // pop structure pointer + ADD $STACK_SIZE, RSP + + MOVD R0, syscall15Args_a1(R2) // save r1 + MOVD R1, syscall15Args_a2(R2) // save r3 + FMOVD F0, syscall15Args_f1(R2) // save f0 + FMOVD F1, syscall15Args_f2(R2) // save f1 + FMOVD F2, syscall15Args_f3(R2) // save f2 + FMOVD F3, syscall15Args_f4(R2) // save f3 + + RET diff --git a/vendor/github.com/ebitengine/purego/sys_unix_arm64.s b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s new file mode 100644 index 00000000000..6da06b4d188 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/sys_unix_arm64.s @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2023 The Ebitengine Authors + +//go:build darwin || freebsd || linux + +#include "textflag.h" +#include "go_asm.h" +#include "funcdata.h" +#include "abi_arm64.h" + +TEXT callbackasm1(SB), NOSPLIT|NOFRAME, $0 + NO_LOCAL_POINTERS + + // On entry, the trampoline in zcallback_darwin_arm64.s left + // the callback index in R12 (which is volatile in the C ABI). + + // Save callback register arguments R0-R7 and F0-F7. + // We do this at the top of the frame so they're contiguous with stack arguments. + SUB $(16*8), RSP, R14 + FSTPD (F0, F1), (0*8)(R14) + FSTPD (F2, F3), (2*8)(R14) + FSTPD (F4, F5), (4*8)(R14) + FSTPD (F6, F7), (6*8)(R14) + STP (R0, R1), (8*8)(R14) + STP (R2, R3), (10*8)(R14) + STP (R4, R5), (12*8)(R14) + STP (R6, R7), (14*8)(R14) + + // Adjust SP by frame size. + SUB $(26*8), RSP + + // It is important to save R27 because the go assembler + // uses it for move instructions for a variable. + // This line: + // MOVD ·callbackWrap_call(SB), R0 + // Creates the instructions: + // ADRP 14335(PC), R27 + // MOVD 388(27), R0 + // R27 is a callee saved register so we are responsible + // for ensuring its value doesn't change. So save it and + // restore it at the end of this function. + // R30 is the link register. crosscall2 doesn't save it + // so it's saved here. + STP (R27, R30), 0(RSP) + + // Create a struct callbackArgs on our stack. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD R12, callbackArgs_index(R13) // callback index + MOVD R14, callbackArgs_args(R13) // address of args vector + MOVD ZR, callbackArgs_result(R13) // result + + // Move parameters into registers + // Get the ABIInternal function pointer + // without by using a closure. + MOVD ·callbackWrap_call(SB), R0 + MOVD (R0), R0 // fn unsafe.Pointer + MOVD R13, R1 // frame (&callbackArgs{...}) + MOVD $0, R3 // ctxt uintptr + + BL crosscall2(SB) + + // Get callback result. + MOVD $(callbackArgs__size)(RSP), R13 + MOVD callbackArgs_result(R13), R0 + + // Restore LR and R27 + LDP 0(RSP), (R27, R30) + ADD $(26*8), RSP + + RET diff --git a/vendor/github.com/ebitengine/purego/syscall.go b/vendor/github.com/ebitengine/purego/syscall.go new file mode 100644 index 00000000000..c30688dda13 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || linux || windows + +package purego + +// CDecl marks a function as being called using the __cdecl calling convention as defined in +// the [MSDocs] when passed to NewCallback. It must be the first argument to the function. +// This is only useful on 386 Windows, but it is safe to use on other platforms. +// +// [MSDocs]: https://learn.microsoft.com/en-us/cpp/cpp/cdecl?view=msvc-170 +type CDecl struct{} + +const ( + maxArgs = 15 + numOfFloats = 8 // arm64 and amd64 both have 8 float registers +) + +type syscall15Args struct { + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr + f1, f2, f3, f4, f5, f6, f7, f8 uintptr + arm64_r8 uintptr +} + +// SyscallN takes fn, a C function pointer and a list of arguments as uintptr. +// There is an internal maximum number of arguments that SyscallN can take. It panics +// when the maximum is exceeded. It returns the result and the libc error code if there is one. +// +// NOTE: SyscallN does not properly call functions that have both integer and float parameters. +// See discussion comment https://github.com/ebiten/purego/pull/1#issuecomment-1128057607 +// for an explanation of why that is. +// +// On amd64, if there are more than 8 floats the 9th and so on will be placed incorrectly on the +// stack. +// +// The pragma go:nosplit is not needed at this function declaration because it uses go:uintptrescapes +// which forces all the objects that the uintptrs point to onto the heap where a stack split won't affect +// their memory location. +// +//go:uintptrescapes +func SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { + if fn == 0 { + panic("purego: fn is nil") + } + if len(args) > maxArgs { + panic("purego: too many arguments to SyscallN") + } + // add padding so there is no out-of-bounds slicing + var tmp [maxArgs]uintptr + copy(tmp[:], args) + return syscall_syscall15X(fn, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5], tmp[6], tmp[7], tmp[8], tmp[9], tmp[10], tmp[11], tmp[12], tmp[13], tmp[14]) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go new file mode 100644 index 00000000000..36ee14e3b73 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_cgo_linux.go @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build cgo && !(amd64 || arm64) + +package purego + +import ( + "github.com/ebitengine/purego/internal/cgo" +) + +var syscall15XABI0 = uintptr(cgo.Syscall15XABI0) + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + return cgo.Syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) +} + +func NewCallback(_ interface{}) uintptr { + panic("purego: NewCallback on Linux is only supported on amd64/arm64") +} diff --git a/vendor/github.com/ebitengine/purego/syscall_sysv.go b/vendor/github.com/ebitengine/purego/syscall_sysv.go new file mode 100644 index 00000000000..cce171c8f60 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_sysv.go @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +//go:build darwin || freebsd || (linux && (amd64 || arm64)) + +package purego + +import ( + "reflect" + "runtime" + "sync" + "unsafe" +) + +var syscall15XABI0 uintptr + +//go:nosplit +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + args := syscall15Args{ + fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, + a1, a2, a3, a4, a5, a6, a7, a8, + 0, + } + runtime_cgocall(syscall15XABI0, unsafe.Pointer(&args)) + return args.a1, args.a2, 0 +} + +// NewCallback converts a Go function to a function pointer conforming to the C calling convention. +// This is useful when interoperating with C code requiring callbacks. The argument is expected to be a +// function with zero or one uintptr-sized result. The function must not have arguments with size larger than the size +// of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory allocated +// for these callbacks is never released. At least 2000 callbacks can always be created. Although this function +// provides similar functionality to windows.NewCallback it is distinct. +func NewCallback(fn interface{}) uintptr { + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + } + return compileCallback(fn) +} + +// maxCb is the maximum number of callbacks +// only increase this if you have added more to the callbackasm function +const maxCB = 2000 + +var cbs struct { + lock sync.Mutex + numFn int // the number of functions currently in cbs.funcs + funcs [maxCB]reflect.Value // the saved callbacks +} + +type callbackArgs struct { + index uintptr + // args points to the argument block. + // + // The structure of the arguments goes + // float registers followed by the + // integer registers followed by the stack. + // + // This variable is treated as a continuous + // block of memory containing all of the arguments + // for this callback. + args unsafe.Pointer + // Below are out-args from callbackWrap + result uintptr +} + +func compileCallback(fn interface{}) uintptr { + val := reflect.ValueOf(fn) + if val.Kind() != reflect.Func { + panic("purego: the type must be a function but was not") + } + if val.IsNil() { + panic("purego: function must not be nil") + } + ty := val.Type() + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + switch in.Kind() { + case reflect.Struct: + if i == 0 && in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + fallthrough + case reflect.Interface, reflect.Func, reflect.Slice, + reflect.Chan, reflect.Complex64, reflect.Complex128, + reflect.String, reflect.Map, reflect.Invalid: + panic("purego: unsupported argument type: " + in.Kind().String()) + } + } +output: + switch { + case ty.NumOut() == 1: + switch ty.Out(0).Kind() { + case reflect.Pointer, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.UnsafePointer: + break output + } + panic("purego: unsupported return type: " + ty.String()) + case ty.NumOut() > 1: + panic("purego: callbacks can only have one return") + } + cbs.lock.Lock() + defer cbs.lock.Unlock() + if cbs.numFn >= maxCB { + panic("purego: the maximum number of callbacks has been reached") + } + cbs.funcs[cbs.numFn] = val + cbs.numFn++ + return callbackasmAddr(cbs.numFn - 1) +} + +const ptrSize = unsafe.Sizeof((*int)(nil)) + +const callbackMaxFrame = 64 * ptrSize + +// callbackasm is implemented in zcallback_GOOS_GOARCH.s +// +//go:linkname __callbackasm callbackasm +var __callbackasm byte +var callbackasmABI0 = uintptr(unsafe.Pointer(&__callbackasm)) + +// callbackWrap_call allows the calling of the ABIInternal wrapper +// which is required for runtime.cgocallback without the +// tag which is only allowed in the runtime. +// This closure is used inside sys_darwin_GOARCH.s +var callbackWrap_call = callbackWrap + +// callbackWrap is called by assembly code which determines which Go function to call. +// This function takes the arguments and passes them to the Go function and returns the result. +func callbackWrap(a *callbackArgs) { + cbs.lock.Lock() + fn := cbs.funcs[a.index] + cbs.lock.Unlock() + fnType := fn.Type() + args := make([]reflect.Value, fnType.NumIn()) + frame := (*[callbackMaxFrame]uintptr)(a.args) + var floatsN int // floatsN represents the number of float arguments processed + var intsN int // intsN represents the number of integer arguments processed + // stack points to the index into frame of the current stack element. + // The stack begins after the float and integer registers. + stack := numOfIntegerRegisters() + numOfFloats + for i := range args { + var pos int + switch fnType.In(i).Kind() { + case reflect.Float32, reflect.Float64: + if floatsN >= numOfFloats { + pos = stack + stack++ + } else { + pos = floatsN + } + floatsN++ + case reflect.Struct: + // This is the CDecl field + args[i] = reflect.Zero(fnType.In(i)) + continue + default: + + if intsN >= numOfIntegerRegisters() { + pos = stack + stack++ + } else { + // the integers begin after the floats in frame + pos = intsN + numOfFloats + } + intsN++ + } + args[i] = reflect.NewAt(fnType.In(i), unsafe.Pointer(&frame[pos])).Elem() + } + ret := fn.Call(args) + if len(ret) > 0 { + switch k := ret[0].Kind(); k { + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uintptr: + a.result = uintptr(ret[0].Uint()) + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + a.result = uintptr(ret[0].Int()) + case reflect.Bool: + if ret[0].Bool() { + a.result = 1 + } else { + a.result = 0 + } + case reflect.Pointer: + a.result = ret[0].Pointer() + case reflect.UnsafePointer: + a.result = ret[0].Pointer() + default: + panic("purego: unsupported kind: " + k.String()) + } + } +} + +// callbackasmAddr returns address of runtime.callbackasm +// function adjusted by i. +// On x86 and amd64, runtime.callbackasm is a series of CALL instructions, +// and we want callback to arrive at +// correspondent call instruction instead of start of +// runtime.callbackasm. +// On ARM, runtime.callbackasm is a series of mov and branch instructions. +// R12 is loaded with the callback index. Each entry is two instructions, +// hence 8 bytes. +func callbackasmAddr(i int) uintptr { + var entrySize int + switch runtime.GOARCH { + default: + panic("purego: unsupported architecture") + case "386", "amd64": + entrySize = 5 + case "arm", "arm64": + // On ARM and ARM64, each entry is a MOV instruction + // followed by a branch instruction + entrySize = 8 + } + return callbackasmABI0 + uintptr(i*entrySize) +} diff --git a/vendor/github.com/ebitengine/purego/syscall_windows.go b/vendor/github.com/ebitengine/purego/syscall_windows.go new file mode 100644 index 00000000000..5fbfcabfdc9 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/syscall_windows.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 +// SPDX-FileCopyrightText: 2022 The Ebitengine Authors + +package purego + +import ( + "reflect" + "syscall" +) + +var syscall15XABI0 uintptr + +func syscall_syscall15X(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + r1, r2, errno := syscall.Syscall15(fn, 15, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) + return r1, r2, uintptr(errno) +} + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. The argument is expected to be a +// function with one uintptr-sized result. The function must not have arguments with size larger than the +// size of uintptr. Only a limited number of callbacks may be created in a single Go process, and any memory +// allocated for these callbacks is never released. Between NewCallback and NewCallbackCDecl, at least 1024 +// callbacks can always be created. Although this function is similiar to the darwin version it may act +// differently. +func NewCallback(fn interface{}) uintptr { + isCDecl := false + ty := reflect.TypeOf(fn) + for i := 0; i < ty.NumIn(); i++ { + in := ty.In(i) + if !in.AssignableTo(reflect.TypeOf(CDecl{})) { + continue + } + if i != 0 { + panic("purego: CDecl must be the first argument") + } + isCDecl = true + } + if isCDecl { + return syscall.NewCallbackCDecl(fn) + } + return syscall.NewCallback(fn) +} + +func loadSymbol(handle uintptr, name string) (uintptr, error) { + return syscall.GetProcAddress(syscall.Handle(handle), name) +} diff --git a/vendor/github.com/ebitengine/purego/zcallback_amd64.s b/vendor/github.com/ebitengine/purego/zcallback_amd64.s new file mode 100644 index 00000000000..6a778bfcad1 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_amd64.s @@ -0,0 +1,2014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux + +// runtime·callbackasm is called by external code to +// execute Go implemented callback function. It is not +// called from the start, instead runtime·compilecallback +// always returns address into runtime·callbackasm offset +// appropriately so different callbacks start with different +// CALL instruction in runtime·callbackasm. This determines +// which Go callback function is executed later on. +#include "textflag.h" + +TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0 + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) + CALL callbackasm1(SB) diff --git a/vendor/github.com/ebitengine/purego/zcallback_arm64.s b/vendor/github.com/ebitengine/purego/zcallback_arm64.s new file mode 100644 index 00000000000..c079b8038e3 --- /dev/null +++ b/vendor/github.com/ebitengine/purego/zcallback_arm64.s @@ -0,0 +1,4014 @@ +// Code generated by wincallback.go using 'go generate'. DO NOT EDIT. + +//go:build darwin || freebsd || linux + +// External code calls into callbackasm at an offset corresponding +// to the callback index. Callbackasm is a table of MOV and B instructions. +// The MOV instruction loads R12 with the callback index, and the +// B instruction branches to callbackasm1. +// callbackasm1 takes the callback index from R12 and +// indexes into an array that stores information about each callback. +// It then calls the Go implementation for that callback. +#include "textflag.h" + +TEXT callbackasm(SB), NOSPLIT|NOFRAME, $0 + MOVD $0, R12 + B callbackasm1(SB) + MOVD $1, R12 + B callbackasm1(SB) + MOVD $2, R12 + B callbackasm1(SB) + MOVD $3, R12 + B callbackasm1(SB) + MOVD $4, R12 + B callbackasm1(SB) + MOVD $5, R12 + B callbackasm1(SB) + MOVD $6, R12 + B callbackasm1(SB) + MOVD $7, R12 + B callbackasm1(SB) + MOVD $8, R12 + B callbackasm1(SB) + MOVD $9, R12 + B callbackasm1(SB) + MOVD $10, R12 + B callbackasm1(SB) + MOVD $11, R12 + B callbackasm1(SB) + MOVD $12, R12 + B callbackasm1(SB) + MOVD $13, R12 + B callbackasm1(SB) + MOVD $14, R12 + B callbackasm1(SB) + MOVD $15, R12 + B callbackasm1(SB) + MOVD $16, R12 + B callbackasm1(SB) + MOVD $17, R12 + B callbackasm1(SB) + MOVD $18, R12 + B callbackasm1(SB) + MOVD $19, R12 + B callbackasm1(SB) + MOVD $20, R12 + B callbackasm1(SB) + MOVD $21, R12 + B callbackasm1(SB) + MOVD $22, R12 + B callbackasm1(SB) + MOVD $23, R12 + B callbackasm1(SB) + MOVD $24, R12 + B callbackasm1(SB) + MOVD $25, R12 + B callbackasm1(SB) + MOVD $26, R12 + B callbackasm1(SB) + MOVD $27, R12 + B callbackasm1(SB) + MOVD $28, R12 + B callbackasm1(SB) + MOVD $29, R12 + B callbackasm1(SB) + MOVD $30, R12 + B callbackasm1(SB) + MOVD $31, R12 + B callbackasm1(SB) + MOVD $32, R12 + B callbackasm1(SB) + MOVD $33, R12 + B callbackasm1(SB) + MOVD $34, R12 + B callbackasm1(SB) + MOVD $35, R12 + B callbackasm1(SB) + MOVD $36, R12 + B callbackasm1(SB) + MOVD $37, R12 + B callbackasm1(SB) + MOVD $38, R12 + B callbackasm1(SB) + MOVD $39, R12 + B callbackasm1(SB) + MOVD $40, R12 + B callbackasm1(SB) + MOVD $41, R12 + B callbackasm1(SB) + MOVD $42, R12 + B callbackasm1(SB) + MOVD $43, R12 + B callbackasm1(SB) + MOVD $44, R12 + B callbackasm1(SB) + MOVD $45, R12 + B callbackasm1(SB) + MOVD $46, R12 + B callbackasm1(SB) + MOVD $47, R12 + B callbackasm1(SB) + MOVD $48, R12 + B callbackasm1(SB) + MOVD $49, R12 + B callbackasm1(SB) + MOVD $50, R12 + B callbackasm1(SB) + MOVD $51, R12 + B callbackasm1(SB) + MOVD $52, R12 + B callbackasm1(SB) + MOVD $53, R12 + B callbackasm1(SB) + MOVD $54, R12 + B callbackasm1(SB) + MOVD $55, R12 + B callbackasm1(SB) + MOVD $56, R12 + B callbackasm1(SB) + MOVD $57, R12 + B callbackasm1(SB) + MOVD $58, R12 + B callbackasm1(SB) + MOVD $59, R12 + B callbackasm1(SB) + MOVD $60, R12 + B callbackasm1(SB) + MOVD $61, R12 + B callbackasm1(SB) + MOVD $62, R12 + B callbackasm1(SB) + MOVD $63, R12 + B callbackasm1(SB) + MOVD $64, R12 + B callbackasm1(SB) + MOVD $65, R12 + B callbackasm1(SB) + MOVD $66, R12 + B callbackasm1(SB) + MOVD $67, R12 + B callbackasm1(SB) + MOVD $68, R12 + B callbackasm1(SB) + MOVD $69, R12 + B callbackasm1(SB) + MOVD $70, R12 + B callbackasm1(SB) + MOVD $71, R12 + B callbackasm1(SB) + MOVD $72, R12 + B callbackasm1(SB) + MOVD $73, R12 + B callbackasm1(SB) + MOVD $74, R12 + B callbackasm1(SB) + MOVD $75, R12 + B callbackasm1(SB) + MOVD $76, R12 + B callbackasm1(SB) + MOVD $77, R12 + B callbackasm1(SB) + MOVD $78, R12 + B callbackasm1(SB) + MOVD $79, R12 + B callbackasm1(SB) + MOVD $80, R12 + B callbackasm1(SB) + MOVD $81, R12 + B callbackasm1(SB) + MOVD $82, R12 + B callbackasm1(SB) + MOVD $83, R12 + B callbackasm1(SB) + MOVD $84, R12 + B callbackasm1(SB) + MOVD $85, R12 + B callbackasm1(SB) + MOVD $86, R12 + B callbackasm1(SB) + MOVD $87, R12 + B callbackasm1(SB) + MOVD $88, R12 + B callbackasm1(SB) + MOVD $89, R12 + B callbackasm1(SB) + MOVD $90, R12 + B callbackasm1(SB) + MOVD $91, R12 + B callbackasm1(SB) + MOVD $92, R12 + B callbackasm1(SB) + MOVD $93, R12 + B callbackasm1(SB) + MOVD $94, R12 + B callbackasm1(SB) + MOVD $95, R12 + B callbackasm1(SB) + MOVD $96, R12 + B callbackasm1(SB) + MOVD $97, R12 + B callbackasm1(SB) + MOVD $98, R12 + B callbackasm1(SB) + MOVD $99, R12 + B callbackasm1(SB) + MOVD $100, R12 + B callbackasm1(SB) + MOVD $101, R12 + B callbackasm1(SB) + MOVD $102, R12 + B callbackasm1(SB) + MOVD $103, R12 + B callbackasm1(SB) + MOVD $104, R12 + B callbackasm1(SB) + MOVD $105, R12 + B callbackasm1(SB) + MOVD $106, R12 + B callbackasm1(SB) + MOVD $107, R12 + B callbackasm1(SB) + MOVD $108, R12 + B callbackasm1(SB) + MOVD $109, R12 + B callbackasm1(SB) + MOVD $110, R12 + B callbackasm1(SB) + MOVD $111, R12 + B callbackasm1(SB) + MOVD $112, R12 + B callbackasm1(SB) + MOVD $113, R12 + B callbackasm1(SB) + MOVD $114, R12 + B callbackasm1(SB) + MOVD $115, R12 + B callbackasm1(SB) + MOVD $116, R12 + B callbackasm1(SB) + MOVD $117, R12 + B callbackasm1(SB) + MOVD $118, R12 + B callbackasm1(SB) + MOVD $119, R12 + B callbackasm1(SB) + MOVD $120, R12 + B callbackasm1(SB) + MOVD $121, R12 + B callbackasm1(SB) + MOVD $122, R12 + B callbackasm1(SB) + MOVD $123, R12 + B callbackasm1(SB) + MOVD $124, R12 + B callbackasm1(SB) + MOVD $125, R12 + B callbackasm1(SB) + MOVD $126, R12 + B callbackasm1(SB) + MOVD $127, R12 + B callbackasm1(SB) + MOVD $128, R12 + B callbackasm1(SB) + MOVD $129, R12 + B callbackasm1(SB) + MOVD $130, R12 + B callbackasm1(SB) + MOVD $131, R12 + B callbackasm1(SB) + MOVD $132, R12 + B callbackasm1(SB) + MOVD $133, R12 + B callbackasm1(SB) + MOVD $134, R12 + B callbackasm1(SB) + MOVD $135, R12 + B callbackasm1(SB) + MOVD $136, R12 + B callbackasm1(SB) + MOVD $137, R12 + B callbackasm1(SB) + MOVD $138, R12 + B callbackasm1(SB) + MOVD $139, R12 + B callbackasm1(SB) + MOVD $140, R12 + B callbackasm1(SB) + MOVD $141, R12 + B callbackasm1(SB) + MOVD $142, R12 + B callbackasm1(SB) + MOVD $143, R12 + B callbackasm1(SB) + MOVD $144, R12 + B callbackasm1(SB) + MOVD $145, R12 + B callbackasm1(SB) + MOVD $146, R12 + B callbackasm1(SB) + MOVD $147, R12 + B callbackasm1(SB) + MOVD $148, R12 + B callbackasm1(SB) + MOVD $149, R12 + B callbackasm1(SB) + MOVD $150, R12 + B callbackasm1(SB) + MOVD $151, R12 + B callbackasm1(SB) + MOVD $152, R12 + B callbackasm1(SB) + MOVD $153, R12 + B callbackasm1(SB) + MOVD $154, R12 + B callbackasm1(SB) + MOVD $155, R12 + B callbackasm1(SB) + MOVD $156, R12 + B callbackasm1(SB) + MOVD $157, R12 + B callbackasm1(SB) + MOVD $158, R12 + B callbackasm1(SB) + MOVD $159, R12 + B callbackasm1(SB) + MOVD $160, R12 + B callbackasm1(SB) + MOVD $161, R12 + B callbackasm1(SB) + MOVD $162, R12 + B callbackasm1(SB) + MOVD $163, R12 + B callbackasm1(SB) + MOVD $164, R12 + B callbackasm1(SB) + MOVD $165, R12 + B callbackasm1(SB) + MOVD $166, R12 + B callbackasm1(SB) + MOVD $167, R12 + B callbackasm1(SB) + MOVD $168, R12 + B callbackasm1(SB) + MOVD $169, R12 + B callbackasm1(SB) + MOVD $170, R12 + B callbackasm1(SB) + MOVD $171, R12 + B callbackasm1(SB) + MOVD $172, R12 + B callbackasm1(SB) + MOVD $173, R12 + B callbackasm1(SB) + MOVD $174, R12 + B callbackasm1(SB) + MOVD $175, R12 + B callbackasm1(SB) + MOVD $176, R12 + B callbackasm1(SB) + MOVD $177, R12 + B callbackasm1(SB) + MOVD $178, R12 + B callbackasm1(SB) + MOVD $179, R12 + B callbackasm1(SB) + MOVD $180, R12 + B callbackasm1(SB) + MOVD $181, R12 + B callbackasm1(SB) + MOVD $182, R12 + B callbackasm1(SB) + MOVD $183, R12 + B callbackasm1(SB) + MOVD $184, R12 + B callbackasm1(SB) + MOVD $185, R12 + B callbackasm1(SB) + MOVD $186, R12 + B callbackasm1(SB) + MOVD $187, R12 + B callbackasm1(SB) + MOVD $188, R12 + B callbackasm1(SB) + MOVD $189, R12 + B callbackasm1(SB) + MOVD $190, R12 + B callbackasm1(SB) + MOVD $191, R12 + B callbackasm1(SB) + MOVD $192, R12 + B callbackasm1(SB) + MOVD $193, R12 + B callbackasm1(SB) + MOVD $194, R12 + B callbackasm1(SB) + MOVD $195, R12 + B callbackasm1(SB) + MOVD $196, R12 + B callbackasm1(SB) + MOVD $197, R12 + B callbackasm1(SB) + MOVD $198, R12 + B callbackasm1(SB) + MOVD $199, R12 + B callbackasm1(SB) + MOVD $200, R12 + B callbackasm1(SB) + MOVD $201, R12 + B callbackasm1(SB) + MOVD $202, R12 + B callbackasm1(SB) + MOVD $203, R12 + B callbackasm1(SB) + MOVD $204, R12 + B callbackasm1(SB) + MOVD $205, R12 + B callbackasm1(SB) + MOVD $206, R12 + B callbackasm1(SB) + MOVD $207, R12 + B callbackasm1(SB) + MOVD $208, R12 + B callbackasm1(SB) + MOVD $209, R12 + B callbackasm1(SB) + MOVD $210, R12 + B callbackasm1(SB) + MOVD $211, R12 + B callbackasm1(SB) + MOVD $212, R12 + B callbackasm1(SB) + MOVD $213, R12 + B callbackasm1(SB) + MOVD $214, R12 + B callbackasm1(SB) + MOVD $215, R12 + B callbackasm1(SB) + MOVD $216, R12 + B callbackasm1(SB) + MOVD $217, R12 + B callbackasm1(SB) + MOVD $218, R12 + B callbackasm1(SB) + MOVD $219, R12 + B callbackasm1(SB) + MOVD $220, R12 + B callbackasm1(SB) + MOVD $221, R12 + B callbackasm1(SB) + MOVD $222, R12 + B callbackasm1(SB) + MOVD $223, R12 + B callbackasm1(SB) + MOVD $224, R12 + B callbackasm1(SB) + MOVD $225, R12 + B callbackasm1(SB) + MOVD $226, R12 + B callbackasm1(SB) + MOVD $227, R12 + B callbackasm1(SB) + MOVD $228, R12 + B callbackasm1(SB) + MOVD $229, R12 + B callbackasm1(SB) + MOVD $230, R12 + B callbackasm1(SB) + MOVD $231, R12 + B callbackasm1(SB) + MOVD $232, R12 + B callbackasm1(SB) + MOVD $233, R12 + B callbackasm1(SB) + MOVD $234, R12 + B callbackasm1(SB) + MOVD $235, R12 + B callbackasm1(SB) + MOVD $236, R12 + B callbackasm1(SB) + MOVD $237, R12 + B callbackasm1(SB) + MOVD $238, R12 + B callbackasm1(SB) + MOVD $239, R12 + B callbackasm1(SB) + MOVD $240, R12 + B callbackasm1(SB) + MOVD $241, R12 + B callbackasm1(SB) + MOVD $242, R12 + B callbackasm1(SB) + MOVD $243, R12 + B callbackasm1(SB) + MOVD $244, R12 + B callbackasm1(SB) + MOVD $245, R12 + B callbackasm1(SB) + MOVD $246, R12 + B callbackasm1(SB) + MOVD $247, R12 + B callbackasm1(SB) + MOVD $248, R12 + B callbackasm1(SB) + MOVD $249, R12 + B callbackasm1(SB) + MOVD $250, R12 + B callbackasm1(SB) + MOVD $251, R12 + B callbackasm1(SB) + MOVD $252, R12 + B callbackasm1(SB) + MOVD $253, R12 + B callbackasm1(SB) + MOVD $254, R12 + B callbackasm1(SB) + MOVD $255, R12 + B callbackasm1(SB) + MOVD $256, R12 + B callbackasm1(SB) + MOVD $257, R12 + B callbackasm1(SB) + MOVD $258, R12 + B callbackasm1(SB) + MOVD $259, R12 + B callbackasm1(SB) + MOVD $260, R12 + B callbackasm1(SB) + MOVD $261, R12 + B callbackasm1(SB) + MOVD $262, R12 + B callbackasm1(SB) + MOVD $263, R12 + B callbackasm1(SB) + MOVD $264, R12 + B callbackasm1(SB) + MOVD $265, R12 + B callbackasm1(SB) + MOVD $266, R12 + B callbackasm1(SB) + MOVD $267, R12 + B callbackasm1(SB) + MOVD $268, R12 + B callbackasm1(SB) + MOVD $269, R12 + B callbackasm1(SB) + MOVD $270, R12 + B callbackasm1(SB) + MOVD $271, R12 + B callbackasm1(SB) + MOVD $272, R12 + B callbackasm1(SB) + MOVD $273, R12 + B callbackasm1(SB) + MOVD $274, R12 + B callbackasm1(SB) + MOVD $275, R12 + B callbackasm1(SB) + MOVD $276, R12 + B callbackasm1(SB) + MOVD $277, R12 + B callbackasm1(SB) + MOVD $278, R12 + B callbackasm1(SB) + MOVD $279, R12 + B callbackasm1(SB) + MOVD $280, R12 + B callbackasm1(SB) + MOVD $281, R12 + B callbackasm1(SB) + MOVD $282, R12 + B callbackasm1(SB) + MOVD $283, R12 + B callbackasm1(SB) + MOVD $284, R12 + B callbackasm1(SB) + MOVD $285, R12 + B callbackasm1(SB) + MOVD $286, R12 + B callbackasm1(SB) + MOVD $287, R12 + B callbackasm1(SB) + MOVD $288, R12 + B callbackasm1(SB) + MOVD $289, R12 + B callbackasm1(SB) + MOVD $290, R12 + B callbackasm1(SB) + MOVD $291, R12 + B callbackasm1(SB) + MOVD $292, R12 + B callbackasm1(SB) + MOVD $293, R12 + B callbackasm1(SB) + MOVD $294, R12 + B callbackasm1(SB) + MOVD $295, R12 + B callbackasm1(SB) + MOVD $296, R12 + B callbackasm1(SB) + MOVD $297, R12 + B callbackasm1(SB) + MOVD $298, R12 + B callbackasm1(SB) + MOVD $299, R12 + B callbackasm1(SB) + MOVD $300, R12 + B callbackasm1(SB) + MOVD $301, R12 + B callbackasm1(SB) + MOVD $302, R12 + B callbackasm1(SB) + MOVD $303, R12 + B callbackasm1(SB) + MOVD $304, R12 + B callbackasm1(SB) + MOVD $305, R12 + B callbackasm1(SB) + MOVD $306, R12 + B callbackasm1(SB) + MOVD $307, R12 + B callbackasm1(SB) + MOVD $308, R12 + B callbackasm1(SB) + MOVD $309, R12 + B callbackasm1(SB) + MOVD $310, R12 + B callbackasm1(SB) + MOVD $311, R12 + B callbackasm1(SB) + MOVD $312, R12 + B callbackasm1(SB) + MOVD $313, R12 + B callbackasm1(SB) + MOVD $314, R12 + B callbackasm1(SB) + MOVD $315, R12 + B callbackasm1(SB) + MOVD $316, R12 + B callbackasm1(SB) + MOVD $317, R12 + B callbackasm1(SB) + MOVD $318, R12 + B callbackasm1(SB) + MOVD $319, R12 + B callbackasm1(SB) + MOVD $320, R12 + B callbackasm1(SB) + MOVD $321, R12 + B callbackasm1(SB) + MOVD $322, R12 + B callbackasm1(SB) + MOVD $323, R12 + B callbackasm1(SB) + MOVD $324, R12 + B callbackasm1(SB) + MOVD $325, R12 + B callbackasm1(SB) + MOVD $326, R12 + B callbackasm1(SB) + MOVD $327, R12 + B callbackasm1(SB) + MOVD $328, R12 + B callbackasm1(SB) + MOVD $329, R12 + B callbackasm1(SB) + MOVD $330, R12 + B callbackasm1(SB) + MOVD $331, R12 + B callbackasm1(SB) + MOVD $332, R12 + B callbackasm1(SB) + MOVD $333, R12 + B callbackasm1(SB) + MOVD $334, R12 + B callbackasm1(SB) + MOVD $335, R12 + B callbackasm1(SB) + MOVD $336, R12 + B callbackasm1(SB) + MOVD $337, R12 + B callbackasm1(SB) + MOVD $338, R12 + B callbackasm1(SB) + MOVD $339, R12 + B callbackasm1(SB) + MOVD $340, R12 + B callbackasm1(SB) + MOVD $341, R12 + B callbackasm1(SB) + MOVD $342, R12 + B callbackasm1(SB) + MOVD $343, R12 + B callbackasm1(SB) + MOVD $344, R12 + B callbackasm1(SB) + MOVD $345, R12 + B callbackasm1(SB) + MOVD $346, R12 + B callbackasm1(SB) + MOVD $347, R12 + B callbackasm1(SB) + MOVD $348, R12 + B callbackasm1(SB) + MOVD $349, R12 + B callbackasm1(SB) + MOVD $350, R12 + B callbackasm1(SB) + MOVD $351, R12 + B callbackasm1(SB) + MOVD $352, R12 + B callbackasm1(SB) + MOVD $353, R12 + B callbackasm1(SB) + MOVD $354, R12 + B callbackasm1(SB) + MOVD $355, R12 + B callbackasm1(SB) + MOVD $356, R12 + B callbackasm1(SB) + MOVD $357, R12 + B callbackasm1(SB) + MOVD $358, R12 + B callbackasm1(SB) + MOVD $359, R12 + B callbackasm1(SB) + MOVD $360, R12 + B callbackasm1(SB) + MOVD $361, R12 + B callbackasm1(SB) + MOVD $362, R12 + B callbackasm1(SB) + MOVD $363, R12 + B callbackasm1(SB) + MOVD $364, R12 + B callbackasm1(SB) + MOVD $365, R12 + B callbackasm1(SB) + MOVD $366, R12 + B callbackasm1(SB) + MOVD $367, R12 + B callbackasm1(SB) + MOVD $368, R12 + B callbackasm1(SB) + MOVD $369, R12 + B callbackasm1(SB) + MOVD $370, R12 + B callbackasm1(SB) + MOVD $371, R12 + B callbackasm1(SB) + MOVD $372, R12 + B callbackasm1(SB) + MOVD $373, R12 + B callbackasm1(SB) + MOVD $374, R12 + B callbackasm1(SB) + MOVD $375, R12 + B callbackasm1(SB) + MOVD $376, R12 + B callbackasm1(SB) + MOVD $377, R12 + B callbackasm1(SB) + MOVD $378, R12 + B callbackasm1(SB) + MOVD $379, R12 + B callbackasm1(SB) + MOVD $380, R12 + B callbackasm1(SB) + MOVD $381, R12 + B callbackasm1(SB) + MOVD $382, R12 + B callbackasm1(SB) + MOVD $383, R12 + B callbackasm1(SB) + MOVD $384, R12 + B callbackasm1(SB) + MOVD $385, R12 + B callbackasm1(SB) + MOVD $386, R12 + B callbackasm1(SB) + MOVD $387, R12 + B callbackasm1(SB) + MOVD $388, R12 + B callbackasm1(SB) + MOVD $389, R12 + B callbackasm1(SB) + MOVD $390, R12 + B callbackasm1(SB) + MOVD $391, R12 + B callbackasm1(SB) + MOVD $392, R12 + B callbackasm1(SB) + MOVD $393, R12 + B callbackasm1(SB) + MOVD $394, R12 + B callbackasm1(SB) + MOVD $395, R12 + B callbackasm1(SB) + MOVD $396, R12 + B callbackasm1(SB) + MOVD $397, R12 + B callbackasm1(SB) + MOVD $398, R12 + B callbackasm1(SB) + MOVD $399, R12 + B callbackasm1(SB) + MOVD $400, R12 + B callbackasm1(SB) + MOVD $401, R12 + B callbackasm1(SB) + MOVD $402, R12 + B callbackasm1(SB) + MOVD $403, R12 + B callbackasm1(SB) + MOVD $404, R12 + B callbackasm1(SB) + MOVD $405, R12 + B callbackasm1(SB) + MOVD $406, R12 + B callbackasm1(SB) + MOVD $407, R12 + B callbackasm1(SB) + MOVD $408, R12 + B callbackasm1(SB) + MOVD $409, R12 + B callbackasm1(SB) + MOVD $410, R12 + B callbackasm1(SB) + MOVD $411, R12 + B callbackasm1(SB) + MOVD $412, R12 + B callbackasm1(SB) + MOVD $413, R12 + B callbackasm1(SB) + MOVD $414, R12 + B callbackasm1(SB) + MOVD $415, R12 + B callbackasm1(SB) + MOVD $416, R12 + B callbackasm1(SB) + MOVD $417, R12 + B callbackasm1(SB) + MOVD $418, R12 + B callbackasm1(SB) + MOVD $419, R12 + B callbackasm1(SB) + MOVD $420, R12 + B callbackasm1(SB) + MOVD $421, R12 + B callbackasm1(SB) + MOVD $422, R12 + B callbackasm1(SB) + MOVD $423, R12 + B callbackasm1(SB) + MOVD $424, R12 + B callbackasm1(SB) + MOVD $425, R12 + B callbackasm1(SB) + MOVD $426, R12 + B callbackasm1(SB) + MOVD $427, R12 + B callbackasm1(SB) + MOVD $428, R12 + B callbackasm1(SB) + MOVD $429, R12 + B callbackasm1(SB) + MOVD $430, R12 + B callbackasm1(SB) + MOVD $431, R12 + B callbackasm1(SB) + MOVD $432, R12 + B callbackasm1(SB) + MOVD $433, R12 + B callbackasm1(SB) + MOVD $434, R12 + B callbackasm1(SB) + MOVD $435, R12 + B callbackasm1(SB) + MOVD $436, R12 + B callbackasm1(SB) + MOVD $437, R12 + B callbackasm1(SB) + MOVD $438, R12 + B callbackasm1(SB) + MOVD $439, R12 + B callbackasm1(SB) + MOVD $440, R12 + B callbackasm1(SB) + MOVD $441, R12 + B callbackasm1(SB) + MOVD $442, R12 + B callbackasm1(SB) + MOVD $443, R12 + B callbackasm1(SB) + MOVD $444, R12 + B callbackasm1(SB) + MOVD $445, R12 + B callbackasm1(SB) + MOVD $446, R12 + B callbackasm1(SB) + MOVD $447, R12 + B callbackasm1(SB) + MOVD $448, R12 + B callbackasm1(SB) + MOVD $449, R12 + B callbackasm1(SB) + MOVD $450, R12 + B callbackasm1(SB) + MOVD $451, R12 + B callbackasm1(SB) + MOVD $452, R12 + B callbackasm1(SB) + MOVD $453, R12 + B callbackasm1(SB) + MOVD $454, R12 + B callbackasm1(SB) + MOVD $455, R12 + B callbackasm1(SB) + MOVD $456, R12 + B callbackasm1(SB) + MOVD $457, R12 + B callbackasm1(SB) + MOVD $458, R12 + B callbackasm1(SB) + MOVD $459, R12 + B callbackasm1(SB) + MOVD $460, R12 + B callbackasm1(SB) + MOVD $461, R12 + B callbackasm1(SB) + MOVD $462, R12 + B callbackasm1(SB) + MOVD $463, R12 + B callbackasm1(SB) + MOVD $464, R12 + B callbackasm1(SB) + MOVD $465, R12 + B callbackasm1(SB) + MOVD $466, R12 + B callbackasm1(SB) + MOVD $467, R12 + B callbackasm1(SB) + MOVD $468, R12 + B callbackasm1(SB) + MOVD $469, R12 + B callbackasm1(SB) + MOVD $470, R12 + B callbackasm1(SB) + MOVD $471, R12 + B callbackasm1(SB) + MOVD $472, R12 + B callbackasm1(SB) + MOVD $473, R12 + B callbackasm1(SB) + MOVD $474, R12 + B callbackasm1(SB) + MOVD $475, R12 + B callbackasm1(SB) + MOVD $476, R12 + B callbackasm1(SB) + MOVD $477, R12 + B callbackasm1(SB) + MOVD $478, R12 + B callbackasm1(SB) + MOVD $479, R12 + B callbackasm1(SB) + MOVD $480, R12 + B callbackasm1(SB) + MOVD $481, R12 + B callbackasm1(SB) + MOVD $482, R12 + B callbackasm1(SB) + MOVD $483, R12 + B callbackasm1(SB) + MOVD $484, R12 + B callbackasm1(SB) + MOVD $485, R12 + B callbackasm1(SB) + MOVD $486, R12 + B callbackasm1(SB) + MOVD $487, R12 + B callbackasm1(SB) + MOVD $488, R12 + B callbackasm1(SB) + MOVD $489, R12 + B callbackasm1(SB) + MOVD $490, R12 + B callbackasm1(SB) + MOVD $491, R12 + B callbackasm1(SB) + MOVD $492, R12 + B callbackasm1(SB) + MOVD $493, R12 + B callbackasm1(SB) + MOVD $494, R12 + B callbackasm1(SB) + MOVD $495, R12 + B callbackasm1(SB) + MOVD $496, R12 + B callbackasm1(SB) + MOVD $497, R12 + B callbackasm1(SB) + MOVD $498, R12 + B callbackasm1(SB) + MOVD $499, R12 + B callbackasm1(SB) + MOVD $500, R12 + B callbackasm1(SB) + MOVD $501, R12 + B callbackasm1(SB) + MOVD $502, R12 + B callbackasm1(SB) + MOVD $503, R12 + B callbackasm1(SB) + MOVD $504, R12 + B callbackasm1(SB) + MOVD $505, R12 + B callbackasm1(SB) + MOVD $506, R12 + B callbackasm1(SB) + MOVD $507, R12 + B callbackasm1(SB) + MOVD $508, R12 + B callbackasm1(SB) + MOVD $509, R12 + B callbackasm1(SB) + MOVD $510, R12 + B callbackasm1(SB) + MOVD $511, R12 + B callbackasm1(SB) + MOVD $512, R12 + B callbackasm1(SB) + MOVD $513, R12 + B callbackasm1(SB) + MOVD $514, R12 + B callbackasm1(SB) + MOVD $515, R12 + B callbackasm1(SB) + MOVD $516, R12 + B callbackasm1(SB) + MOVD $517, R12 + B callbackasm1(SB) + MOVD $518, R12 + B callbackasm1(SB) + MOVD $519, R12 + B callbackasm1(SB) + MOVD $520, R12 + B callbackasm1(SB) + MOVD $521, R12 + B callbackasm1(SB) + MOVD $522, R12 + B callbackasm1(SB) + MOVD $523, R12 + B callbackasm1(SB) + MOVD $524, R12 + B callbackasm1(SB) + MOVD $525, R12 + B callbackasm1(SB) + MOVD $526, R12 + B callbackasm1(SB) + MOVD $527, R12 + B callbackasm1(SB) + MOVD $528, R12 + B callbackasm1(SB) + MOVD $529, R12 + B callbackasm1(SB) + MOVD $530, R12 + B callbackasm1(SB) + MOVD $531, R12 + B callbackasm1(SB) + MOVD $532, R12 + B callbackasm1(SB) + MOVD $533, R12 + B callbackasm1(SB) + MOVD $534, R12 + B callbackasm1(SB) + MOVD $535, R12 + B callbackasm1(SB) + MOVD $536, R12 + B callbackasm1(SB) + MOVD $537, R12 + B callbackasm1(SB) + MOVD $538, R12 + B callbackasm1(SB) + MOVD $539, R12 + B callbackasm1(SB) + MOVD $540, R12 + B callbackasm1(SB) + MOVD $541, R12 + B callbackasm1(SB) + MOVD $542, R12 + B callbackasm1(SB) + MOVD $543, R12 + B callbackasm1(SB) + MOVD $544, R12 + B callbackasm1(SB) + MOVD $545, R12 + B callbackasm1(SB) + MOVD $546, R12 + B callbackasm1(SB) + MOVD $547, R12 + B callbackasm1(SB) + MOVD $548, R12 + B callbackasm1(SB) + MOVD $549, R12 + B callbackasm1(SB) + MOVD $550, R12 + B callbackasm1(SB) + MOVD $551, R12 + B callbackasm1(SB) + MOVD $552, R12 + B callbackasm1(SB) + MOVD $553, R12 + B callbackasm1(SB) + MOVD $554, R12 + B callbackasm1(SB) + MOVD $555, R12 + B callbackasm1(SB) + MOVD $556, R12 + B callbackasm1(SB) + MOVD $557, R12 + B callbackasm1(SB) + MOVD $558, R12 + B callbackasm1(SB) + MOVD $559, R12 + B callbackasm1(SB) + MOVD $560, R12 + B callbackasm1(SB) + MOVD $561, R12 + B callbackasm1(SB) + MOVD $562, R12 + B callbackasm1(SB) + MOVD $563, R12 + B callbackasm1(SB) + MOVD $564, R12 + B callbackasm1(SB) + MOVD $565, R12 + B callbackasm1(SB) + MOVD $566, R12 + B callbackasm1(SB) + MOVD $567, R12 + B callbackasm1(SB) + MOVD $568, R12 + B callbackasm1(SB) + MOVD $569, R12 + B callbackasm1(SB) + MOVD $570, R12 + B callbackasm1(SB) + MOVD $571, R12 + B callbackasm1(SB) + MOVD $572, R12 + B callbackasm1(SB) + MOVD $573, R12 + B callbackasm1(SB) + MOVD $574, R12 + B callbackasm1(SB) + MOVD $575, R12 + B callbackasm1(SB) + MOVD $576, R12 + B callbackasm1(SB) + MOVD $577, R12 + B callbackasm1(SB) + MOVD $578, R12 + B callbackasm1(SB) + MOVD $579, R12 + B callbackasm1(SB) + MOVD $580, R12 + B callbackasm1(SB) + MOVD $581, R12 + B callbackasm1(SB) + MOVD $582, R12 + B callbackasm1(SB) + MOVD $583, R12 + B callbackasm1(SB) + MOVD $584, R12 + B callbackasm1(SB) + MOVD $585, R12 + B callbackasm1(SB) + MOVD $586, R12 + B callbackasm1(SB) + MOVD $587, R12 + B callbackasm1(SB) + MOVD $588, R12 + B callbackasm1(SB) + MOVD $589, R12 + B callbackasm1(SB) + MOVD $590, R12 + B callbackasm1(SB) + MOVD $591, R12 + B callbackasm1(SB) + MOVD $592, R12 + B callbackasm1(SB) + MOVD $593, R12 + B callbackasm1(SB) + MOVD $594, R12 + B callbackasm1(SB) + MOVD $595, R12 + B callbackasm1(SB) + MOVD $596, R12 + B callbackasm1(SB) + MOVD $597, R12 + B callbackasm1(SB) + MOVD $598, R12 + B callbackasm1(SB) + MOVD $599, R12 + B callbackasm1(SB) + MOVD $600, R12 + B callbackasm1(SB) + MOVD $601, R12 + B callbackasm1(SB) + MOVD $602, R12 + B callbackasm1(SB) + MOVD $603, R12 + B callbackasm1(SB) + MOVD $604, R12 + B callbackasm1(SB) + MOVD $605, R12 + B callbackasm1(SB) + MOVD $606, R12 + B callbackasm1(SB) + MOVD $607, R12 + B callbackasm1(SB) + MOVD $608, R12 + B callbackasm1(SB) + MOVD $609, R12 + B callbackasm1(SB) + MOVD $610, R12 + B callbackasm1(SB) + MOVD $611, R12 + B callbackasm1(SB) + MOVD $612, R12 + B callbackasm1(SB) + MOVD $613, R12 + B callbackasm1(SB) + MOVD $614, R12 + B callbackasm1(SB) + MOVD $615, R12 + B callbackasm1(SB) + MOVD $616, R12 + B callbackasm1(SB) + MOVD $617, R12 + B callbackasm1(SB) + MOVD $618, R12 + B callbackasm1(SB) + MOVD $619, R12 + B callbackasm1(SB) + MOVD $620, R12 + B callbackasm1(SB) + MOVD $621, R12 + B callbackasm1(SB) + MOVD $622, R12 + B callbackasm1(SB) + MOVD $623, R12 + B callbackasm1(SB) + MOVD $624, R12 + B callbackasm1(SB) + MOVD $625, R12 + B callbackasm1(SB) + MOVD $626, R12 + B callbackasm1(SB) + MOVD $627, R12 + B callbackasm1(SB) + MOVD $628, R12 + B callbackasm1(SB) + MOVD $629, R12 + B callbackasm1(SB) + MOVD $630, R12 + B callbackasm1(SB) + MOVD $631, R12 + B callbackasm1(SB) + MOVD $632, R12 + B callbackasm1(SB) + MOVD $633, R12 + B callbackasm1(SB) + MOVD $634, R12 + B callbackasm1(SB) + MOVD $635, R12 + B callbackasm1(SB) + MOVD $636, R12 + B callbackasm1(SB) + MOVD $637, R12 + B callbackasm1(SB) + MOVD $638, R12 + B callbackasm1(SB) + MOVD $639, R12 + B callbackasm1(SB) + MOVD $640, R12 + B callbackasm1(SB) + MOVD $641, R12 + B callbackasm1(SB) + MOVD $642, R12 + B callbackasm1(SB) + MOVD $643, R12 + B callbackasm1(SB) + MOVD $644, R12 + B callbackasm1(SB) + MOVD $645, R12 + B callbackasm1(SB) + MOVD $646, R12 + B callbackasm1(SB) + MOVD $647, R12 + B callbackasm1(SB) + MOVD $648, R12 + B callbackasm1(SB) + MOVD $649, R12 + B callbackasm1(SB) + MOVD $650, R12 + B callbackasm1(SB) + MOVD $651, R12 + B callbackasm1(SB) + MOVD $652, R12 + B callbackasm1(SB) + MOVD $653, R12 + B callbackasm1(SB) + MOVD $654, R12 + B callbackasm1(SB) + MOVD $655, R12 + B callbackasm1(SB) + MOVD $656, R12 + B callbackasm1(SB) + MOVD $657, R12 + B callbackasm1(SB) + MOVD $658, R12 + B callbackasm1(SB) + MOVD $659, R12 + B callbackasm1(SB) + MOVD $660, R12 + B callbackasm1(SB) + MOVD $661, R12 + B callbackasm1(SB) + MOVD $662, R12 + B callbackasm1(SB) + MOVD $663, R12 + B callbackasm1(SB) + MOVD $664, R12 + B callbackasm1(SB) + MOVD $665, R12 + B callbackasm1(SB) + MOVD $666, R12 + B callbackasm1(SB) + MOVD $667, R12 + B callbackasm1(SB) + MOVD $668, R12 + B callbackasm1(SB) + MOVD $669, R12 + B callbackasm1(SB) + MOVD $670, R12 + B callbackasm1(SB) + MOVD $671, R12 + B callbackasm1(SB) + MOVD $672, R12 + B callbackasm1(SB) + MOVD $673, R12 + B callbackasm1(SB) + MOVD $674, R12 + B callbackasm1(SB) + MOVD $675, R12 + B callbackasm1(SB) + MOVD $676, R12 + B callbackasm1(SB) + MOVD $677, R12 + B callbackasm1(SB) + MOVD $678, R12 + B callbackasm1(SB) + MOVD $679, R12 + B callbackasm1(SB) + MOVD $680, R12 + B callbackasm1(SB) + MOVD $681, R12 + B callbackasm1(SB) + MOVD $682, R12 + B callbackasm1(SB) + MOVD $683, R12 + B callbackasm1(SB) + MOVD $684, R12 + B callbackasm1(SB) + MOVD $685, R12 + B callbackasm1(SB) + MOVD $686, R12 + B callbackasm1(SB) + MOVD $687, R12 + B callbackasm1(SB) + MOVD $688, R12 + B callbackasm1(SB) + MOVD $689, R12 + B callbackasm1(SB) + MOVD $690, R12 + B callbackasm1(SB) + MOVD $691, R12 + B callbackasm1(SB) + MOVD $692, R12 + B callbackasm1(SB) + MOVD $693, R12 + B callbackasm1(SB) + MOVD $694, R12 + B callbackasm1(SB) + MOVD $695, R12 + B callbackasm1(SB) + MOVD $696, R12 + B callbackasm1(SB) + MOVD $697, R12 + B callbackasm1(SB) + MOVD $698, R12 + B callbackasm1(SB) + MOVD $699, R12 + B callbackasm1(SB) + MOVD $700, R12 + B callbackasm1(SB) + MOVD $701, R12 + B callbackasm1(SB) + MOVD $702, R12 + B callbackasm1(SB) + MOVD $703, R12 + B callbackasm1(SB) + MOVD $704, R12 + B callbackasm1(SB) + MOVD $705, R12 + B callbackasm1(SB) + MOVD $706, R12 + B callbackasm1(SB) + MOVD $707, R12 + B callbackasm1(SB) + MOVD $708, R12 + B callbackasm1(SB) + MOVD $709, R12 + B callbackasm1(SB) + MOVD $710, R12 + B callbackasm1(SB) + MOVD $711, R12 + B callbackasm1(SB) + MOVD $712, R12 + B callbackasm1(SB) + MOVD $713, R12 + B callbackasm1(SB) + MOVD $714, R12 + B callbackasm1(SB) + MOVD $715, R12 + B callbackasm1(SB) + MOVD $716, R12 + B callbackasm1(SB) + MOVD $717, R12 + B callbackasm1(SB) + MOVD $718, R12 + B callbackasm1(SB) + MOVD $719, R12 + B callbackasm1(SB) + MOVD $720, R12 + B callbackasm1(SB) + MOVD $721, R12 + B callbackasm1(SB) + MOVD $722, R12 + B callbackasm1(SB) + MOVD $723, R12 + B callbackasm1(SB) + MOVD $724, R12 + B callbackasm1(SB) + MOVD $725, R12 + B callbackasm1(SB) + MOVD $726, R12 + B callbackasm1(SB) + MOVD $727, R12 + B callbackasm1(SB) + MOVD $728, R12 + B callbackasm1(SB) + MOVD $729, R12 + B callbackasm1(SB) + MOVD $730, R12 + B callbackasm1(SB) + MOVD $731, R12 + B callbackasm1(SB) + MOVD $732, R12 + B callbackasm1(SB) + MOVD $733, R12 + B callbackasm1(SB) + MOVD $734, R12 + B callbackasm1(SB) + MOVD $735, R12 + B callbackasm1(SB) + MOVD $736, R12 + B callbackasm1(SB) + MOVD $737, R12 + B callbackasm1(SB) + MOVD $738, R12 + B callbackasm1(SB) + MOVD $739, R12 + B callbackasm1(SB) + MOVD $740, R12 + B callbackasm1(SB) + MOVD $741, R12 + B callbackasm1(SB) + MOVD $742, R12 + B callbackasm1(SB) + MOVD $743, R12 + B callbackasm1(SB) + MOVD $744, R12 + B callbackasm1(SB) + MOVD $745, R12 + B callbackasm1(SB) + MOVD $746, R12 + B callbackasm1(SB) + MOVD $747, R12 + B callbackasm1(SB) + MOVD $748, R12 + B callbackasm1(SB) + MOVD $749, R12 + B callbackasm1(SB) + MOVD $750, R12 + B callbackasm1(SB) + MOVD $751, R12 + B callbackasm1(SB) + MOVD $752, R12 + B callbackasm1(SB) + MOVD $753, R12 + B callbackasm1(SB) + MOVD $754, R12 + B callbackasm1(SB) + MOVD $755, R12 + B callbackasm1(SB) + MOVD $756, R12 + B callbackasm1(SB) + MOVD $757, R12 + B callbackasm1(SB) + MOVD $758, R12 + B callbackasm1(SB) + MOVD $759, R12 + B callbackasm1(SB) + MOVD $760, R12 + B callbackasm1(SB) + MOVD $761, R12 + B callbackasm1(SB) + MOVD $762, R12 + B callbackasm1(SB) + MOVD $763, R12 + B callbackasm1(SB) + MOVD $764, R12 + B callbackasm1(SB) + MOVD $765, R12 + B callbackasm1(SB) + MOVD $766, R12 + B callbackasm1(SB) + MOVD $767, R12 + B callbackasm1(SB) + MOVD $768, R12 + B callbackasm1(SB) + MOVD $769, R12 + B callbackasm1(SB) + MOVD $770, R12 + B callbackasm1(SB) + MOVD $771, R12 + B callbackasm1(SB) + MOVD $772, R12 + B callbackasm1(SB) + MOVD $773, R12 + B callbackasm1(SB) + MOVD $774, R12 + B callbackasm1(SB) + MOVD $775, R12 + B callbackasm1(SB) + MOVD $776, R12 + B callbackasm1(SB) + MOVD $777, R12 + B callbackasm1(SB) + MOVD $778, R12 + B callbackasm1(SB) + MOVD $779, R12 + B callbackasm1(SB) + MOVD $780, R12 + B callbackasm1(SB) + MOVD $781, R12 + B callbackasm1(SB) + MOVD $782, R12 + B callbackasm1(SB) + MOVD $783, R12 + B callbackasm1(SB) + MOVD $784, R12 + B callbackasm1(SB) + MOVD $785, R12 + B callbackasm1(SB) + MOVD $786, R12 + B callbackasm1(SB) + MOVD $787, R12 + B callbackasm1(SB) + MOVD $788, R12 + B callbackasm1(SB) + MOVD $789, R12 + B callbackasm1(SB) + MOVD $790, R12 + B callbackasm1(SB) + MOVD $791, R12 + B callbackasm1(SB) + MOVD $792, R12 + B callbackasm1(SB) + MOVD $793, R12 + B callbackasm1(SB) + MOVD $794, R12 + B callbackasm1(SB) + MOVD $795, R12 + B callbackasm1(SB) + MOVD $796, R12 + B callbackasm1(SB) + MOVD $797, R12 + B callbackasm1(SB) + MOVD $798, R12 + B callbackasm1(SB) + MOVD $799, R12 + B callbackasm1(SB) + MOVD $800, R12 + B callbackasm1(SB) + MOVD $801, R12 + B callbackasm1(SB) + MOVD $802, R12 + B callbackasm1(SB) + MOVD $803, R12 + B callbackasm1(SB) + MOVD $804, R12 + B callbackasm1(SB) + MOVD $805, R12 + B callbackasm1(SB) + MOVD $806, R12 + B callbackasm1(SB) + MOVD $807, R12 + B callbackasm1(SB) + MOVD $808, R12 + B callbackasm1(SB) + MOVD $809, R12 + B callbackasm1(SB) + MOVD $810, R12 + B callbackasm1(SB) + MOVD $811, R12 + B callbackasm1(SB) + MOVD $812, R12 + B callbackasm1(SB) + MOVD $813, R12 + B callbackasm1(SB) + MOVD $814, R12 + B callbackasm1(SB) + MOVD $815, R12 + B callbackasm1(SB) + MOVD $816, R12 + B callbackasm1(SB) + MOVD $817, R12 + B callbackasm1(SB) + MOVD $818, R12 + B callbackasm1(SB) + MOVD $819, R12 + B callbackasm1(SB) + MOVD $820, R12 + B callbackasm1(SB) + MOVD $821, R12 + B callbackasm1(SB) + MOVD $822, R12 + B callbackasm1(SB) + MOVD $823, R12 + B callbackasm1(SB) + MOVD $824, R12 + B callbackasm1(SB) + MOVD $825, R12 + B callbackasm1(SB) + MOVD $826, R12 + B callbackasm1(SB) + MOVD $827, R12 + B callbackasm1(SB) + MOVD $828, R12 + B callbackasm1(SB) + MOVD $829, R12 + B callbackasm1(SB) + MOVD $830, R12 + B callbackasm1(SB) + MOVD $831, R12 + B callbackasm1(SB) + MOVD $832, R12 + B callbackasm1(SB) + MOVD $833, R12 + B callbackasm1(SB) + MOVD $834, R12 + B callbackasm1(SB) + MOVD $835, R12 + B callbackasm1(SB) + MOVD $836, R12 + B callbackasm1(SB) + MOVD $837, R12 + B callbackasm1(SB) + MOVD $838, R12 + B callbackasm1(SB) + MOVD $839, R12 + B callbackasm1(SB) + MOVD $840, R12 + B callbackasm1(SB) + MOVD $841, R12 + B callbackasm1(SB) + MOVD $842, R12 + B callbackasm1(SB) + MOVD $843, R12 + B callbackasm1(SB) + MOVD $844, R12 + B callbackasm1(SB) + MOVD $845, R12 + B callbackasm1(SB) + MOVD $846, R12 + B callbackasm1(SB) + MOVD $847, R12 + B callbackasm1(SB) + MOVD $848, R12 + B callbackasm1(SB) + MOVD $849, R12 + B callbackasm1(SB) + MOVD $850, R12 + B callbackasm1(SB) + MOVD $851, R12 + B callbackasm1(SB) + MOVD $852, R12 + B callbackasm1(SB) + MOVD $853, R12 + B callbackasm1(SB) + MOVD $854, R12 + B callbackasm1(SB) + MOVD $855, R12 + B callbackasm1(SB) + MOVD $856, R12 + B callbackasm1(SB) + MOVD $857, R12 + B callbackasm1(SB) + MOVD $858, R12 + B callbackasm1(SB) + MOVD $859, R12 + B callbackasm1(SB) + MOVD $860, R12 + B callbackasm1(SB) + MOVD $861, R12 + B callbackasm1(SB) + MOVD $862, R12 + B callbackasm1(SB) + MOVD $863, R12 + B callbackasm1(SB) + MOVD $864, R12 + B callbackasm1(SB) + MOVD $865, R12 + B callbackasm1(SB) + MOVD $866, R12 + B callbackasm1(SB) + MOVD $867, R12 + B callbackasm1(SB) + MOVD $868, R12 + B callbackasm1(SB) + MOVD $869, R12 + B callbackasm1(SB) + MOVD $870, R12 + B callbackasm1(SB) + MOVD $871, R12 + B callbackasm1(SB) + MOVD $872, R12 + B callbackasm1(SB) + MOVD $873, R12 + B callbackasm1(SB) + MOVD $874, R12 + B callbackasm1(SB) + MOVD $875, R12 + B callbackasm1(SB) + MOVD $876, R12 + B callbackasm1(SB) + MOVD $877, R12 + B callbackasm1(SB) + MOVD $878, R12 + B callbackasm1(SB) + MOVD $879, R12 + B callbackasm1(SB) + MOVD $880, R12 + B callbackasm1(SB) + MOVD $881, R12 + B callbackasm1(SB) + MOVD $882, R12 + B callbackasm1(SB) + MOVD $883, R12 + B callbackasm1(SB) + MOVD $884, R12 + B callbackasm1(SB) + MOVD $885, R12 + B callbackasm1(SB) + MOVD $886, R12 + B callbackasm1(SB) + MOVD $887, R12 + B callbackasm1(SB) + MOVD $888, R12 + B callbackasm1(SB) + MOVD $889, R12 + B callbackasm1(SB) + MOVD $890, R12 + B callbackasm1(SB) + MOVD $891, R12 + B callbackasm1(SB) + MOVD $892, R12 + B callbackasm1(SB) + MOVD $893, R12 + B callbackasm1(SB) + MOVD $894, R12 + B callbackasm1(SB) + MOVD $895, R12 + B callbackasm1(SB) + MOVD $896, R12 + B callbackasm1(SB) + MOVD $897, R12 + B callbackasm1(SB) + MOVD $898, R12 + B callbackasm1(SB) + MOVD $899, R12 + B callbackasm1(SB) + MOVD $900, R12 + B callbackasm1(SB) + MOVD $901, R12 + B callbackasm1(SB) + MOVD $902, R12 + B callbackasm1(SB) + MOVD $903, R12 + B callbackasm1(SB) + MOVD $904, R12 + B callbackasm1(SB) + MOVD $905, R12 + B callbackasm1(SB) + MOVD $906, R12 + B callbackasm1(SB) + MOVD $907, R12 + B callbackasm1(SB) + MOVD $908, R12 + B callbackasm1(SB) + MOVD $909, R12 + B callbackasm1(SB) + MOVD $910, R12 + B callbackasm1(SB) + MOVD $911, R12 + B callbackasm1(SB) + MOVD $912, R12 + B callbackasm1(SB) + MOVD $913, R12 + B callbackasm1(SB) + MOVD $914, R12 + B callbackasm1(SB) + MOVD $915, R12 + B callbackasm1(SB) + MOVD $916, R12 + B callbackasm1(SB) + MOVD $917, R12 + B callbackasm1(SB) + MOVD $918, R12 + B callbackasm1(SB) + MOVD $919, R12 + B callbackasm1(SB) + MOVD $920, R12 + B callbackasm1(SB) + MOVD $921, R12 + B callbackasm1(SB) + MOVD $922, R12 + B callbackasm1(SB) + MOVD $923, R12 + B callbackasm1(SB) + MOVD $924, R12 + B callbackasm1(SB) + MOVD $925, R12 + B callbackasm1(SB) + MOVD $926, R12 + B callbackasm1(SB) + MOVD $927, R12 + B callbackasm1(SB) + MOVD $928, R12 + B callbackasm1(SB) + MOVD $929, R12 + B callbackasm1(SB) + MOVD $930, R12 + B callbackasm1(SB) + MOVD $931, R12 + B callbackasm1(SB) + MOVD $932, R12 + B callbackasm1(SB) + MOVD $933, R12 + B callbackasm1(SB) + MOVD $934, R12 + B callbackasm1(SB) + MOVD $935, R12 + B callbackasm1(SB) + MOVD $936, R12 + B callbackasm1(SB) + MOVD $937, R12 + B callbackasm1(SB) + MOVD $938, R12 + B callbackasm1(SB) + MOVD $939, R12 + B callbackasm1(SB) + MOVD $940, R12 + B callbackasm1(SB) + MOVD $941, R12 + B callbackasm1(SB) + MOVD $942, R12 + B callbackasm1(SB) + MOVD $943, R12 + B callbackasm1(SB) + MOVD $944, R12 + B callbackasm1(SB) + MOVD $945, R12 + B callbackasm1(SB) + MOVD $946, R12 + B callbackasm1(SB) + MOVD $947, R12 + B callbackasm1(SB) + MOVD $948, R12 + B callbackasm1(SB) + MOVD $949, R12 + B callbackasm1(SB) + MOVD $950, R12 + B callbackasm1(SB) + MOVD $951, R12 + B callbackasm1(SB) + MOVD $952, R12 + B callbackasm1(SB) + MOVD $953, R12 + B callbackasm1(SB) + MOVD $954, R12 + B callbackasm1(SB) + MOVD $955, R12 + B callbackasm1(SB) + MOVD $956, R12 + B callbackasm1(SB) + MOVD $957, R12 + B callbackasm1(SB) + MOVD $958, R12 + B callbackasm1(SB) + MOVD $959, R12 + B callbackasm1(SB) + MOVD $960, R12 + B callbackasm1(SB) + MOVD $961, R12 + B callbackasm1(SB) + MOVD $962, R12 + B callbackasm1(SB) + MOVD $963, R12 + B callbackasm1(SB) + MOVD $964, R12 + B callbackasm1(SB) + MOVD $965, R12 + B callbackasm1(SB) + MOVD $966, R12 + B callbackasm1(SB) + MOVD $967, R12 + B callbackasm1(SB) + MOVD $968, R12 + B callbackasm1(SB) + MOVD $969, R12 + B callbackasm1(SB) + MOVD $970, R12 + B callbackasm1(SB) + MOVD $971, R12 + B callbackasm1(SB) + MOVD $972, R12 + B callbackasm1(SB) + MOVD $973, R12 + B callbackasm1(SB) + MOVD $974, R12 + B callbackasm1(SB) + MOVD $975, R12 + B callbackasm1(SB) + MOVD $976, R12 + B callbackasm1(SB) + MOVD $977, R12 + B callbackasm1(SB) + MOVD $978, R12 + B callbackasm1(SB) + MOVD $979, R12 + B callbackasm1(SB) + MOVD $980, R12 + B callbackasm1(SB) + MOVD $981, R12 + B callbackasm1(SB) + MOVD $982, R12 + B callbackasm1(SB) + MOVD $983, R12 + B callbackasm1(SB) + MOVD $984, R12 + B callbackasm1(SB) + MOVD $985, R12 + B callbackasm1(SB) + MOVD $986, R12 + B callbackasm1(SB) + MOVD $987, R12 + B callbackasm1(SB) + MOVD $988, R12 + B callbackasm1(SB) + MOVD $989, R12 + B callbackasm1(SB) + MOVD $990, R12 + B callbackasm1(SB) + MOVD $991, R12 + B callbackasm1(SB) + MOVD $992, R12 + B callbackasm1(SB) + MOVD $993, R12 + B callbackasm1(SB) + MOVD $994, R12 + B callbackasm1(SB) + MOVD $995, R12 + B callbackasm1(SB) + MOVD $996, R12 + B callbackasm1(SB) + MOVD $997, R12 + B callbackasm1(SB) + MOVD $998, R12 + B callbackasm1(SB) + MOVD $999, R12 + B callbackasm1(SB) + MOVD $1000, R12 + B callbackasm1(SB) + MOVD $1001, R12 + B callbackasm1(SB) + MOVD $1002, R12 + B callbackasm1(SB) + MOVD $1003, R12 + B callbackasm1(SB) + MOVD $1004, R12 + B callbackasm1(SB) + MOVD $1005, R12 + B callbackasm1(SB) + MOVD $1006, R12 + B callbackasm1(SB) + MOVD $1007, R12 + B callbackasm1(SB) + MOVD $1008, R12 + B callbackasm1(SB) + MOVD $1009, R12 + B callbackasm1(SB) + MOVD $1010, R12 + B callbackasm1(SB) + MOVD $1011, R12 + B callbackasm1(SB) + MOVD $1012, R12 + B callbackasm1(SB) + MOVD $1013, R12 + B callbackasm1(SB) + MOVD $1014, R12 + B callbackasm1(SB) + MOVD $1015, R12 + B callbackasm1(SB) + MOVD $1016, R12 + B callbackasm1(SB) + MOVD $1017, R12 + B callbackasm1(SB) + MOVD $1018, R12 + B callbackasm1(SB) + MOVD $1019, R12 + B callbackasm1(SB) + MOVD $1020, R12 + B callbackasm1(SB) + MOVD $1021, R12 + B callbackasm1(SB) + MOVD $1022, R12 + B callbackasm1(SB) + MOVD $1023, R12 + B callbackasm1(SB) + MOVD $1024, R12 + B callbackasm1(SB) + MOVD $1025, R12 + B callbackasm1(SB) + MOVD $1026, R12 + B callbackasm1(SB) + MOVD $1027, R12 + B callbackasm1(SB) + MOVD $1028, R12 + B callbackasm1(SB) + MOVD $1029, R12 + B callbackasm1(SB) + MOVD $1030, R12 + B callbackasm1(SB) + MOVD $1031, R12 + B callbackasm1(SB) + MOVD $1032, R12 + B callbackasm1(SB) + MOVD $1033, R12 + B callbackasm1(SB) + MOVD $1034, R12 + B callbackasm1(SB) + MOVD $1035, R12 + B callbackasm1(SB) + MOVD $1036, R12 + B callbackasm1(SB) + MOVD $1037, R12 + B callbackasm1(SB) + MOVD $1038, R12 + B callbackasm1(SB) + MOVD $1039, R12 + B callbackasm1(SB) + MOVD $1040, R12 + B callbackasm1(SB) + MOVD $1041, R12 + B callbackasm1(SB) + MOVD $1042, R12 + B callbackasm1(SB) + MOVD $1043, R12 + B callbackasm1(SB) + MOVD $1044, R12 + B callbackasm1(SB) + MOVD $1045, R12 + B callbackasm1(SB) + MOVD $1046, R12 + B callbackasm1(SB) + MOVD $1047, R12 + B callbackasm1(SB) + MOVD $1048, R12 + B callbackasm1(SB) + MOVD $1049, R12 + B callbackasm1(SB) + MOVD $1050, R12 + B callbackasm1(SB) + MOVD $1051, R12 + B callbackasm1(SB) + MOVD $1052, R12 + B callbackasm1(SB) + MOVD $1053, R12 + B callbackasm1(SB) + MOVD $1054, R12 + B callbackasm1(SB) + MOVD $1055, R12 + B callbackasm1(SB) + MOVD $1056, R12 + B callbackasm1(SB) + MOVD $1057, R12 + B callbackasm1(SB) + MOVD $1058, R12 + B callbackasm1(SB) + MOVD $1059, R12 + B callbackasm1(SB) + MOVD $1060, R12 + B callbackasm1(SB) + MOVD $1061, R12 + B callbackasm1(SB) + MOVD $1062, R12 + B callbackasm1(SB) + MOVD $1063, R12 + B callbackasm1(SB) + MOVD $1064, R12 + B callbackasm1(SB) + MOVD $1065, R12 + B callbackasm1(SB) + MOVD $1066, R12 + B callbackasm1(SB) + MOVD $1067, R12 + B callbackasm1(SB) + MOVD $1068, R12 + B callbackasm1(SB) + MOVD $1069, R12 + B callbackasm1(SB) + MOVD $1070, R12 + B callbackasm1(SB) + MOVD $1071, R12 + B callbackasm1(SB) + MOVD $1072, R12 + B callbackasm1(SB) + MOVD $1073, R12 + B callbackasm1(SB) + MOVD $1074, R12 + B callbackasm1(SB) + MOVD $1075, R12 + B callbackasm1(SB) + MOVD $1076, R12 + B callbackasm1(SB) + MOVD $1077, R12 + B callbackasm1(SB) + MOVD $1078, R12 + B callbackasm1(SB) + MOVD $1079, R12 + B callbackasm1(SB) + MOVD $1080, R12 + B callbackasm1(SB) + MOVD $1081, R12 + B callbackasm1(SB) + MOVD $1082, R12 + B callbackasm1(SB) + MOVD $1083, R12 + B callbackasm1(SB) + MOVD $1084, R12 + B callbackasm1(SB) + MOVD $1085, R12 + B callbackasm1(SB) + MOVD $1086, R12 + B callbackasm1(SB) + MOVD $1087, R12 + B callbackasm1(SB) + MOVD $1088, R12 + B callbackasm1(SB) + MOVD $1089, R12 + B callbackasm1(SB) + MOVD $1090, R12 + B callbackasm1(SB) + MOVD $1091, R12 + B callbackasm1(SB) + MOVD $1092, R12 + B callbackasm1(SB) + MOVD $1093, R12 + B callbackasm1(SB) + MOVD $1094, R12 + B callbackasm1(SB) + MOVD $1095, R12 + B callbackasm1(SB) + MOVD $1096, R12 + B callbackasm1(SB) + MOVD $1097, R12 + B callbackasm1(SB) + MOVD $1098, R12 + B callbackasm1(SB) + MOVD $1099, R12 + B callbackasm1(SB) + MOVD $1100, R12 + B callbackasm1(SB) + MOVD $1101, R12 + B callbackasm1(SB) + MOVD $1102, R12 + B callbackasm1(SB) + MOVD $1103, R12 + B callbackasm1(SB) + MOVD $1104, R12 + B callbackasm1(SB) + MOVD $1105, R12 + B callbackasm1(SB) + MOVD $1106, R12 + B callbackasm1(SB) + MOVD $1107, R12 + B callbackasm1(SB) + MOVD $1108, R12 + B callbackasm1(SB) + MOVD $1109, R12 + B callbackasm1(SB) + MOVD $1110, R12 + B callbackasm1(SB) + MOVD $1111, R12 + B callbackasm1(SB) + MOVD $1112, R12 + B callbackasm1(SB) + MOVD $1113, R12 + B callbackasm1(SB) + MOVD $1114, R12 + B callbackasm1(SB) + MOVD $1115, R12 + B callbackasm1(SB) + MOVD $1116, R12 + B callbackasm1(SB) + MOVD $1117, R12 + B callbackasm1(SB) + MOVD $1118, R12 + B callbackasm1(SB) + MOVD $1119, R12 + B callbackasm1(SB) + MOVD $1120, R12 + B callbackasm1(SB) + MOVD $1121, R12 + B callbackasm1(SB) + MOVD $1122, R12 + B callbackasm1(SB) + MOVD $1123, R12 + B callbackasm1(SB) + MOVD $1124, R12 + B callbackasm1(SB) + MOVD $1125, R12 + B callbackasm1(SB) + MOVD $1126, R12 + B callbackasm1(SB) + MOVD $1127, R12 + B callbackasm1(SB) + MOVD $1128, R12 + B callbackasm1(SB) + MOVD $1129, R12 + B callbackasm1(SB) + MOVD $1130, R12 + B callbackasm1(SB) + MOVD $1131, R12 + B callbackasm1(SB) + MOVD $1132, R12 + B callbackasm1(SB) + MOVD $1133, R12 + B callbackasm1(SB) + MOVD $1134, R12 + B callbackasm1(SB) + MOVD $1135, R12 + B callbackasm1(SB) + MOVD $1136, R12 + B callbackasm1(SB) + MOVD $1137, R12 + B callbackasm1(SB) + MOVD $1138, R12 + B callbackasm1(SB) + MOVD $1139, R12 + B callbackasm1(SB) + MOVD $1140, R12 + B callbackasm1(SB) + MOVD $1141, R12 + B callbackasm1(SB) + MOVD $1142, R12 + B callbackasm1(SB) + MOVD $1143, R12 + B callbackasm1(SB) + MOVD $1144, R12 + B callbackasm1(SB) + MOVD $1145, R12 + B callbackasm1(SB) + MOVD $1146, R12 + B callbackasm1(SB) + MOVD $1147, R12 + B callbackasm1(SB) + MOVD $1148, R12 + B callbackasm1(SB) + MOVD $1149, R12 + B callbackasm1(SB) + MOVD $1150, R12 + B callbackasm1(SB) + MOVD $1151, R12 + B callbackasm1(SB) + MOVD $1152, R12 + B callbackasm1(SB) + MOVD $1153, R12 + B callbackasm1(SB) + MOVD $1154, R12 + B callbackasm1(SB) + MOVD $1155, R12 + B callbackasm1(SB) + MOVD $1156, R12 + B callbackasm1(SB) + MOVD $1157, R12 + B callbackasm1(SB) + MOVD $1158, R12 + B callbackasm1(SB) + MOVD $1159, R12 + B callbackasm1(SB) + MOVD $1160, R12 + B callbackasm1(SB) + MOVD $1161, R12 + B callbackasm1(SB) + MOVD $1162, R12 + B callbackasm1(SB) + MOVD $1163, R12 + B callbackasm1(SB) + MOVD $1164, R12 + B callbackasm1(SB) + MOVD $1165, R12 + B callbackasm1(SB) + MOVD $1166, R12 + B callbackasm1(SB) + MOVD $1167, R12 + B callbackasm1(SB) + MOVD $1168, R12 + B callbackasm1(SB) + MOVD $1169, R12 + B callbackasm1(SB) + MOVD $1170, R12 + B callbackasm1(SB) + MOVD $1171, R12 + B callbackasm1(SB) + MOVD $1172, R12 + B callbackasm1(SB) + MOVD $1173, R12 + B callbackasm1(SB) + MOVD $1174, R12 + B callbackasm1(SB) + MOVD $1175, R12 + B callbackasm1(SB) + MOVD $1176, R12 + B callbackasm1(SB) + MOVD $1177, R12 + B callbackasm1(SB) + MOVD $1178, R12 + B callbackasm1(SB) + MOVD $1179, R12 + B callbackasm1(SB) + MOVD $1180, R12 + B callbackasm1(SB) + MOVD $1181, R12 + B callbackasm1(SB) + MOVD $1182, R12 + B callbackasm1(SB) + MOVD $1183, R12 + B callbackasm1(SB) + MOVD $1184, R12 + B callbackasm1(SB) + MOVD $1185, R12 + B callbackasm1(SB) + MOVD $1186, R12 + B callbackasm1(SB) + MOVD $1187, R12 + B callbackasm1(SB) + MOVD $1188, R12 + B callbackasm1(SB) + MOVD $1189, R12 + B callbackasm1(SB) + MOVD $1190, R12 + B callbackasm1(SB) + MOVD $1191, R12 + B callbackasm1(SB) + MOVD $1192, R12 + B callbackasm1(SB) + MOVD $1193, R12 + B callbackasm1(SB) + MOVD $1194, R12 + B callbackasm1(SB) + MOVD $1195, R12 + B callbackasm1(SB) + MOVD $1196, R12 + B callbackasm1(SB) + MOVD $1197, R12 + B callbackasm1(SB) + MOVD $1198, R12 + B callbackasm1(SB) + MOVD $1199, R12 + B callbackasm1(SB) + MOVD $1200, R12 + B callbackasm1(SB) + MOVD $1201, R12 + B callbackasm1(SB) + MOVD $1202, R12 + B callbackasm1(SB) + MOVD $1203, R12 + B callbackasm1(SB) + MOVD $1204, R12 + B callbackasm1(SB) + MOVD $1205, R12 + B callbackasm1(SB) + MOVD $1206, R12 + B callbackasm1(SB) + MOVD $1207, R12 + B callbackasm1(SB) + MOVD $1208, R12 + B callbackasm1(SB) + MOVD $1209, R12 + B callbackasm1(SB) + MOVD $1210, R12 + B callbackasm1(SB) + MOVD $1211, R12 + B callbackasm1(SB) + MOVD $1212, R12 + B callbackasm1(SB) + MOVD $1213, R12 + B callbackasm1(SB) + MOVD $1214, R12 + B callbackasm1(SB) + MOVD $1215, R12 + B callbackasm1(SB) + MOVD $1216, R12 + B callbackasm1(SB) + MOVD $1217, R12 + B callbackasm1(SB) + MOVD $1218, R12 + B callbackasm1(SB) + MOVD $1219, R12 + B callbackasm1(SB) + MOVD $1220, R12 + B callbackasm1(SB) + MOVD $1221, R12 + B callbackasm1(SB) + MOVD $1222, R12 + B callbackasm1(SB) + MOVD $1223, R12 + B callbackasm1(SB) + MOVD $1224, R12 + B callbackasm1(SB) + MOVD $1225, R12 + B callbackasm1(SB) + MOVD $1226, R12 + B callbackasm1(SB) + MOVD $1227, R12 + B callbackasm1(SB) + MOVD $1228, R12 + B callbackasm1(SB) + MOVD $1229, R12 + B callbackasm1(SB) + MOVD $1230, R12 + B callbackasm1(SB) + MOVD $1231, R12 + B callbackasm1(SB) + MOVD $1232, R12 + B callbackasm1(SB) + MOVD $1233, R12 + B callbackasm1(SB) + MOVD $1234, R12 + B callbackasm1(SB) + MOVD $1235, R12 + B callbackasm1(SB) + MOVD $1236, R12 + B callbackasm1(SB) + MOVD $1237, R12 + B callbackasm1(SB) + MOVD $1238, R12 + B callbackasm1(SB) + MOVD $1239, R12 + B callbackasm1(SB) + MOVD $1240, R12 + B callbackasm1(SB) + MOVD $1241, R12 + B callbackasm1(SB) + MOVD $1242, R12 + B callbackasm1(SB) + MOVD $1243, R12 + B callbackasm1(SB) + MOVD $1244, R12 + B callbackasm1(SB) + MOVD $1245, R12 + B callbackasm1(SB) + MOVD $1246, R12 + B callbackasm1(SB) + MOVD $1247, R12 + B callbackasm1(SB) + MOVD $1248, R12 + B callbackasm1(SB) + MOVD $1249, R12 + B callbackasm1(SB) + MOVD $1250, R12 + B callbackasm1(SB) + MOVD $1251, R12 + B callbackasm1(SB) + MOVD $1252, R12 + B callbackasm1(SB) + MOVD $1253, R12 + B callbackasm1(SB) + MOVD $1254, R12 + B callbackasm1(SB) + MOVD $1255, R12 + B callbackasm1(SB) + MOVD $1256, R12 + B callbackasm1(SB) + MOVD $1257, R12 + B callbackasm1(SB) + MOVD $1258, R12 + B callbackasm1(SB) + MOVD $1259, R12 + B callbackasm1(SB) + MOVD $1260, R12 + B callbackasm1(SB) + MOVD $1261, R12 + B callbackasm1(SB) + MOVD $1262, R12 + B callbackasm1(SB) + MOVD $1263, R12 + B callbackasm1(SB) + MOVD $1264, R12 + B callbackasm1(SB) + MOVD $1265, R12 + B callbackasm1(SB) + MOVD $1266, R12 + B callbackasm1(SB) + MOVD $1267, R12 + B callbackasm1(SB) + MOVD $1268, R12 + B callbackasm1(SB) + MOVD $1269, R12 + B callbackasm1(SB) + MOVD $1270, R12 + B callbackasm1(SB) + MOVD $1271, R12 + B callbackasm1(SB) + MOVD $1272, R12 + B callbackasm1(SB) + MOVD $1273, R12 + B callbackasm1(SB) + MOVD $1274, R12 + B callbackasm1(SB) + MOVD $1275, R12 + B callbackasm1(SB) + MOVD $1276, R12 + B callbackasm1(SB) + MOVD $1277, R12 + B callbackasm1(SB) + MOVD $1278, R12 + B callbackasm1(SB) + MOVD $1279, R12 + B callbackasm1(SB) + MOVD $1280, R12 + B callbackasm1(SB) + MOVD $1281, R12 + B callbackasm1(SB) + MOVD $1282, R12 + B callbackasm1(SB) + MOVD $1283, R12 + B callbackasm1(SB) + MOVD $1284, R12 + B callbackasm1(SB) + MOVD $1285, R12 + B callbackasm1(SB) + MOVD $1286, R12 + B callbackasm1(SB) + MOVD $1287, R12 + B callbackasm1(SB) + MOVD $1288, R12 + B callbackasm1(SB) + MOVD $1289, R12 + B callbackasm1(SB) + MOVD $1290, R12 + B callbackasm1(SB) + MOVD $1291, R12 + B callbackasm1(SB) + MOVD $1292, R12 + B callbackasm1(SB) + MOVD $1293, R12 + B callbackasm1(SB) + MOVD $1294, R12 + B callbackasm1(SB) + MOVD $1295, R12 + B callbackasm1(SB) + MOVD $1296, R12 + B callbackasm1(SB) + MOVD $1297, R12 + B callbackasm1(SB) + MOVD $1298, R12 + B callbackasm1(SB) + MOVD $1299, R12 + B callbackasm1(SB) + MOVD $1300, R12 + B callbackasm1(SB) + MOVD $1301, R12 + B callbackasm1(SB) + MOVD $1302, R12 + B callbackasm1(SB) + MOVD $1303, R12 + B callbackasm1(SB) + MOVD $1304, R12 + B callbackasm1(SB) + MOVD $1305, R12 + B callbackasm1(SB) + MOVD $1306, R12 + B callbackasm1(SB) + MOVD $1307, R12 + B callbackasm1(SB) + MOVD $1308, R12 + B callbackasm1(SB) + MOVD $1309, R12 + B callbackasm1(SB) + MOVD $1310, R12 + B callbackasm1(SB) + MOVD $1311, R12 + B callbackasm1(SB) + MOVD $1312, R12 + B callbackasm1(SB) + MOVD $1313, R12 + B callbackasm1(SB) + MOVD $1314, R12 + B callbackasm1(SB) + MOVD $1315, R12 + B callbackasm1(SB) + MOVD $1316, R12 + B callbackasm1(SB) + MOVD $1317, R12 + B callbackasm1(SB) + MOVD $1318, R12 + B callbackasm1(SB) + MOVD $1319, R12 + B callbackasm1(SB) + MOVD $1320, R12 + B callbackasm1(SB) + MOVD $1321, R12 + B callbackasm1(SB) + MOVD $1322, R12 + B callbackasm1(SB) + MOVD $1323, R12 + B callbackasm1(SB) + MOVD $1324, R12 + B callbackasm1(SB) + MOVD $1325, R12 + B callbackasm1(SB) + MOVD $1326, R12 + B callbackasm1(SB) + MOVD $1327, R12 + B callbackasm1(SB) + MOVD $1328, R12 + B callbackasm1(SB) + MOVD $1329, R12 + B callbackasm1(SB) + MOVD $1330, R12 + B callbackasm1(SB) + MOVD $1331, R12 + B callbackasm1(SB) + MOVD $1332, R12 + B callbackasm1(SB) + MOVD $1333, R12 + B callbackasm1(SB) + MOVD $1334, R12 + B callbackasm1(SB) + MOVD $1335, R12 + B callbackasm1(SB) + MOVD $1336, R12 + B callbackasm1(SB) + MOVD $1337, R12 + B callbackasm1(SB) + MOVD $1338, R12 + B callbackasm1(SB) + MOVD $1339, R12 + B callbackasm1(SB) + MOVD $1340, R12 + B callbackasm1(SB) + MOVD $1341, R12 + B callbackasm1(SB) + MOVD $1342, R12 + B callbackasm1(SB) + MOVD $1343, R12 + B callbackasm1(SB) + MOVD $1344, R12 + B callbackasm1(SB) + MOVD $1345, R12 + B callbackasm1(SB) + MOVD $1346, R12 + B callbackasm1(SB) + MOVD $1347, R12 + B callbackasm1(SB) + MOVD $1348, R12 + B callbackasm1(SB) + MOVD $1349, R12 + B callbackasm1(SB) + MOVD $1350, R12 + B callbackasm1(SB) + MOVD $1351, R12 + B callbackasm1(SB) + MOVD $1352, R12 + B callbackasm1(SB) + MOVD $1353, R12 + B callbackasm1(SB) + MOVD $1354, R12 + B callbackasm1(SB) + MOVD $1355, R12 + B callbackasm1(SB) + MOVD $1356, R12 + B callbackasm1(SB) + MOVD $1357, R12 + B callbackasm1(SB) + MOVD $1358, R12 + B callbackasm1(SB) + MOVD $1359, R12 + B callbackasm1(SB) + MOVD $1360, R12 + B callbackasm1(SB) + MOVD $1361, R12 + B callbackasm1(SB) + MOVD $1362, R12 + B callbackasm1(SB) + MOVD $1363, R12 + B callbackasm1(SB) + MOVD $1364, R12 + B callbackasm1(SB) + MOVD $1365, R12 + B callbackasm1(SB) + MOVD $1366, R12 + B callbackasm1(SB) + MOVD $1367, R12 + B callbackasm1(SB) + MOVD $1368, R12 + B callbackasm1(SB) + MOVD $1369, R12 + B callbackasm1(SB) + MOVD $1370, R12 + B callbackasm1(SB) + MOVD $1371, R12 + B callbackasm1(SB) + MOVD $1372, R12 + B callbackasm1(SB) + MOVD $1373, R12 + B callbackasm1(SB) + MOVD $1374, R12 + B callbackasm1(SB) + MOVD $1375, R12 + B callbackasm1(SB) + MOVD $1376, R12 + B callbackasm1(SB) + MOVD $1377, R12 + B callbackasm1(SB) + MOVD $1378, R12 + B callbackasm1(SB) + MOVD $1379, R12 + B callbackasm1(SB) + MOVD $1380, R12 + B callbackasm1(SB) + MOVD $1381, R12 + B callbackasm1(SB) + MOVD $1382, R12 + B callbackasm1(SB) + MOVD $1383, R12 + B callbackasm1(SB) + MOVD $1384, R12 + B callbackasm1(SB) + MOVD $1385, R12 + B callbackasm1(SB) + MOVD $1386, R12 + B callbackasm1(SB) + MOVD $1387, R12 + B callbackasm1(SB) + MOVD $1388, R12 + B callbackasm1(SB) + MOVD $1389, R12 + B callbackasm1(SB) + MOVD $1390, R12 + B callbackasm1(SB) + MOVD $1391, R12 + B callbackasm1(SB) + MOVD $1392, R12 + B callbackasm1(SB) + MOVD $1393, R12 + B callbackasm1(SB) + MOVD $1394, R12 + B callbackasm1(SB) + MOVD $1395, R12 + B callbackasm1(SB) + MOVD $1396, R12 + B callbackasm1(SB) + MOVD $1397, R12 + B callbackasm1(SB) + MOVD $1398, R12 + B callbackasm1(SB) + MOVD $1399, R12 + B callbackasm1(SB) + MOVD $1400, R12 + B callbackasm1(SB) + MOVD $1401, R12 + B callbackasm1(SB) + MOVD $1402, R12 + B callbackasm1(SB) + MOVD $1403, R12 + B callbackasm1(SB) + MOVD $1404, R12 + B callbackasm1(SB) + MOVD $1405, R12 + B callbackasm1(SB) + MOVD $1406, R12 + B callbackasm1(SB) + MOVD $1407, R12 + B callbackasm1(SB) + MOVD $1408, R12 + B callbackasm1(SB) + MOVD $1409, R12 + B callbackasm1(SB) + MOVD $1410, R12 + B callbackasm1(SB) + MOVD $1411, R12 + B callbackasm1(SB) + MOVD $1412, R12 + B callbackasm1(SB) + MOVD $1413, R12 + B callbackasm1(SB) + MOVD $1414, R12 + B callbackasm1(SB) + MOVD $1415, R12 + B callbackasm1(SB) + MOVD $1416, R12 + B callbackasm1(SB) + MOVD $1417, R12 + B callbackasm1(SB) + MOVD $1418, R12 + B callbackasm1(SB) + MOVD $1419, R12 + B callbackasm1(SB) + MOVD $1420, R12 + B callbackasm1(SB) + MOVD $1421, R12 + B callbackasm1(SB) + MOVD $1422, R12 + B callbackasm1(SB) + MOVD $1423, R12 + B callbackasm1(SB) + MOVD $1424, R12 + B callbackasm1(SB) + MOVD $1425, R12 + B callbackasm1(SB) + MOVD $1426, R12 + B callbackasm1(SB) + MOVD $1427, R12 + B callbackasm1(SB) + MOVD $1428, R12 + B callbackasm1(SB) + MOVD $1429, R12 + B callbackasm1(SB) + MOVD $1430, R12 + B callbackasm1(SB) + MOVD $1431, R12 + B callbackasm1(SB) + MOVD $1432, R12 + B callbackasm1(SB) + MOVD $1433, R12 + B callbackasm1(SB) + MOVD $1434, R12 + B callbackasm1(SB) + MOVD $1435, R12 + B callbackasm1(SB) + MOVD $1436, R12 + B callbackasm1(SB) + MOVD $1437, R12 + B callbackasm1(SB) + MOVD $1438, R12 + B callbackasm1(SB) + MOVD $1439, R12 + B callbackasm1(SB) + MOVD $1440, R12 + B callbackasm1(SB) + MOVD $1441, R12 + B callbackasm1(SB) + MOVD $1442, R12 + B callbackasm1(SB) + MOVD $1443, R12 + B callbackasm1(SB) + MOVD $1444, R12 + B callbackasm1(SB) + MOVD $1445, R12 + B callbackasm1(SB) + MOVD $1446, R12 + B callbackasm1(SB) + MOVD $1447, R12 + B callbackasm1(SB) + MOVD $1448, R12 + B callbackasm1(SB) + MOVD $1449, R12 + B callbackasm1(SB) + MOVD $1450, R12 + B callbackasm1(SB) + MOVD $1451, R12 + B callbackasm1(SB) + MOVD $1452, R12 + B callbackasm1(SB) + MOVD $1453, R12 + B callbackasm1(SB) + MOVD $1454, R12 + B callbackasm1(SB) + MOVD $1455, R12 + B callbackasm1(SB) + MOVD $1456, R12 + B callbackasm1(SB) + MOVD $1457, R12 + B callbackasm1(SB) + MOVD $1458, R12 + B callbackasm1(SB) + MOVD $1459, R12 + B callbackasm1(SB) + MOVD $1460, R12 + B callbackasm1(SB) + MOVD $1461, R12 + B callbackasm1(SB) + MOVD $1462, R12 + B callbackasm1(SB) + MOVD $1463, R12 + B callbackasm1(SB) + MOVD $1464, R12 + B callbackasm1(SB) + MOVD $1465, R12 + B callbackasm1(SB) + MOVD $1466, R12 + B callbackasm1(SB) + MOVD $1467, R12 + B callbackasm1(SB) + MOVD $1468, R12 + B callbackasm1(SB) + MOVD $1469, R12 + B callbackasm1(SB) + MOVD $1470, R12 + B callbackasm1(SB) + MOVD $1471, R12 + B callbackasm1(SB) + MOVD $1472, R12 + B callbackasm1(SB) + MOVD $1473, R12 + B callbackasm1(SB) + MOVD $1474, R12 + B callbackasm1(SB) + MOVD $1475, R12 + B callbackasm1(SB) + MOVD $1476, R12 + B callbackasm1(SB) + MOVD $1477, R12 + B callbackasm1(SB) + MOVD $1478, R12 + B callbackasm1(SB) + MOVD $1479, R12 + B callbackasm1(SB) + MOVD $1480, R12 + B callbackasm1(SB) + MOVD $1481, R12 + B callbackasm1(SB) + MOVD $1482, R12 + B callbackasm1(SB) + MOVD $1483, R12 + B callbackasm1(SB) + MOVD $1484, R12 + B callbackasm1(SB) + MOVD $1485, R12 + B callbackasm1(SB) + MOVD $1486, R12 + B callbackasm1(SB) + MOVD $1487, R12 + B callbackasm1(SB) + MOVD $1488, R12 + B callbackasm1(SB) + MOVD $1489, R12 + B callbackasm1(SB) + MOVD $1490, R12 + B callbackasm1(SB) + MOVD $1491, R12 + B callbackasm1(SB) + MOVD $1492, R12 + B callbackasm1(SB) + MOVD $1493, R12 + B callbackasm1(SB) + MOVD $1494, R12 + B callbackasm1(SB) + MOVD $1495, R12 + B callbackasm1(SB) + MOVD $1496, R12 + B callbackasm1(SB) + MOVD $1497, R12 + B callbackasm1(SB) + MOVD $1498, R12 + B callbackasm1(SB) + MOVD $1499, R12 + B callbackasm1(SB) + MOVD $1500, R12 + B callbackasm1(SB) + MOVD $1501, R12 + B callbackasm1(SB) + MOVD $1502, R12 + B callbackasm1(SB) + MOVD $1503, R12 + B callbackasm1(SB) + MOVD $1504, R12 + B callbackasm1(SB) + MOVD $1505, R12 + B callbackasm1(SB) + MOVD $1506, R12 + B callbackasm1(SB) + MOVD $1507, R12 + B callbackasm1(SB) + MOVD $1508, R12 + B callbackasm1(SB) + MOVD $1509, R12 + B callbackasm1(SB) + MOVD $1510, R12 + B callbackasm1(SB) + MOVD $1511, R12 + B callbackasm1(SB) + MOVD $1512, R12 + B callbackasm1(SB) + MOVD $1513, R12 + B callbackasm1(SB) + MOVD $1514, R12 + B callbackasm1(SB) + MOVD $1515, R12 + B callbackasm1(SB) + MOVD $1516, R12 + B callbackasm1(SB) + MOVD $1517, R12 + B callbackasm1(SB) + MOVD $1518, R12 + B callbackasm1(SB) + MOVD $1519, R12 + B callbackasm1(SB) + MOVD $1520, R12 + B callbackasm1(SB) + MOVD $1521, R12 + B callbackasm1(SB) + MOVD $1522, R12 + B callbackasm1(SB) + MOVD $1523, R12 + B callbackasm1(SB) + MOVD $1524, R12 + B callbackasm1(SB) + MOVD $1525, R12 + B callbackasm1(SB) + MOVD $1526, R12 + B callbackasm1(SB) + MOVD $1527, R12 + B callbackasm1(SB) + MOVD $1528, R12 + B callbackasm1(SB) + MOVD $1529, R12 + B callbackasm1(SB) + MOVD $1530, R12 + B callbackasm1(SB) + MOVD $1531, R12 + B callbackasm1(SB) + MOVD $1532, R12 + B callbackasm1(SB) + MOVD $1533, R12 + B callbackasm1(SB) + MOVD $1534, R12 + B callbackasm1(SB) + MOVD $1535, R12 + B callbackasm1(SB) + MOVD $1536, R12 + B callbackasm1(SB) + MOVD $1537, R12 + B callbackasm1(SB) + MOVD $1538, R12 + B callbackasm1(SB) + MOVD $1539, R12 + B callbackasm1(SB) + MOVD $1540, R12 + B callbackasm1(SB) + MOVD $1541, R12 + B callbackasm1(SB) + MOVD $1542, R12 + B callbackasm1(SB) + MOVD $1543, R12 + B callbackasm1(SB) + MOVD $1544, R12 + B callbackasm1(SB) + MOVD $1545, R12 + B callbackasm1(SB) + MOVD $1546, R12 + B callbackasm1(SB) + MOVD $1547, R12 + B callbackasm1(SB) + MOVD $1548, R12 + B callbackasm1(SB) + MOVD $1549, R12 + B callbackasm1(SB) + MOVD $1550, R12 + B callbackasm1(SB) + MOVD $1551, R12 + B callbackasm1(SB) + MOVD $1552, R12 + B callbackasm1(SB) + MOVD $1553, R12 + B callbackasm1(SB) + MOVD $1554, R12 + B callbackasm1(SB) + MOVD $1555, R12 + B callbackasm1(SB) + MOVD $1556, R12 + B callbackasm1(SB) + MOVD $1557, R12 + B callbackasm1(SB) + MOVD $1558, R12 + B callbackasm1(SB) + MOVD $1559, R12 + B callbackasm1(SB) + MOVD $1560, R12 + B callbackasm1(SB) + MOVD $1561, R12 + B callbackasm1(SB) + MOVD $1562, R12 + B callbackasm1(SB) + MOVD $1563, R12 + B callbackasm1(SB) + MOVD $1564, R12 + B callbackasm1(SB) + MOVD $1565, R12 + B callbackasm1(SB) + MOVD $1566, R12 + B callbackasm1(SB) + MOVD $1567, R12 + B callbackasm1(SB) + MOVD $1568, R12 + B callbackasm1(SB) + MOVD $1569, R12 + B callbackasm1(SB) + MOVD $1570, R12 + B callbackasm1(SB) + MOVD $1571, R12 + B callbackasm1(SB) + MOVD $1572, R12 + B callbackasm1(SB) + MOVD $1573, R12 + B callbackasm1(SB) + MOVD $1574, R12 + B callbackasm1(SB) + MOVD $1575, R12 + B callbackasm1(SB) + MOVD $1576, R12 + B callbackasm1(SB) + MOVD $1577, R12 + B callbackasm1(SB) + MOVD $1578, R12 + B callbackasm1(SB) + MOVD $1579, R12 + B callbackasm1(SB) + MOVD $1580, R12 + B callbackasm1(SB) + MOVD $1581, R12 + B callbackasm1(SB) + MOVD $1582, R12 + B callbackasm1(SB) + MOVD $1583, R12 + B callbackasm1(SB) + MOVD $1584, R12 + B callbackasm1(SB) + MOVD $1585, R12 + B callbackasm1(SB) + MOVD $1586, R12 + B callbackasm1(SB) + MOVD $1587, R12 + B callbackasm1(SB) + MOVD $1588, R12 + B callbackasm1(SB) + MOVD $1589, R12 + B callbackasm1(SB) + MOVD $1590, R12 + B callbackasm1(SB) + MOVD $1591, R12 + B callbackasm1(SB) + MOVD $1592, R12 + B callbackasm1(SB) + MOVD $1593, R12 + B callbackasm1(SB) + MOVD $1594, R12 + B callbackasm1(SB) + MOVD $1595, R12 + B callbackasm1(SB) + MOVD $1596, R12 + B callbackasm1(SB) + MOVD $1597, R12 + B callbackasm1(SB) + MOVD $1598, R12 + B callbackasm1(SB) + MOVD $1599, R12 + B callbackasm1(SB) + MOVD $1600, R12 + B callbackasm1(SB) + MOVD $1601, R12 + B callbackasm1(SB) + MOVD $1602, R12 + B callbackasm1(SB) + MOVD $1603, R12 + B callbackasm1(SB) + MOVD $1604, R12 + B callbackasm1(SB) + MOVD $1605, R12 + B callbackasm1(SB) + MOVD $1606, R12 + B callbackasm1(SB) + MOVD $1607, R12 + B callbackasm1(SB) + MOVD $1608, R12 + B callbackasm1(SB) + MOVD $1609, R12 + B callbackasm1(SB) + MOVD $1610, R12 + B callbackasm1(SB) + MOVD $1611, R12 + B callbackasm1(SB) + MOVD $1612, R12 + B callbackasm1(SB) + MOVD $1613, R12 + B callbackasm1(SB) + MOVD $1614, R12 + B callbackasm1(SB) + MOVD $1615, R12 + B callbackasm1(SB) + MOVD $1616, R12 + B callbackasm1(SB) + MOVD $1617, R12 + B callbackasm1(SB) + MOVD $1618, R12 + B callbackasm1(SB) + MOVD $1619, R12 + B callbackasm1(SB) + MOVD $1620, R12 + B callbackasm1(SB) + MOVD $1621, R12 + B callbackasm1(SB) + MOVD $1622, R12 + B callbackasm1(SB) + MOVD $1623, R12 + B callbackasm1(SB) + MOVD $1624, R12 + B callbackasm1(SB) + MOVD $1625, R12 + B callbackasm1(SB) + MOVD $1626, R12 + B callbackasm1(SB) + MOVD $1627, R12 + B callbackasm1(SB) + MOVD $1628, R12 + B callbackasm1(SB) + MOVD $1629, R12 + B callbackasm1(SB) + MOVD $1630, R12 + B callbackasm1(SB) + MOVD $1631, R12 + B callbackasm1(SB) + MOVD $1632, R12 + B callbackasm1(SB) + MOVD $1633, R12 + B callbackasm1(SB) + MOVD $1634, R12 + B callbackasm1(SB) + MOVD $1635, R12 + B callbackasm1(SB) + MOVD $1636, R12 + B callbackasm1(SB) + MOVD $1637, R12 + B callbackasm1(SB) + MOVD $1638, R12 + B callbackasm1(SB) + MOVD $1639, R12 + B callbackasm1(SB) + MOVD $1640, R12 + B callbackasm1(SB) + MOVD $1641, R12 + B callbackasm1(SB) + MOVD $1642, R12 + B callbackasm1(SB) + MOVD $1643, R12 + B callbackasm1(SB) + MOVD $1644, R12 + B callbackasm1(SB) + MOVD $1645, R12 + B callbackasm1(SB) + MOVD $1646, R12 + B callbackasm1(SB) + MOVD $1647, R12 + B callbackasm1(SB) + MOVD $1648, R12 + B callbackasm1(SB) + MOVD $1649, R12 + B callbackasm1(SB) + MOVD $1650, R12 + B callbackasm1(SB) + MOVD $1651, R12 + B callbackasm1(SB) + MOVD $1652, R12 + B callbackasm1(SB) + MOVD $1653, R12 + B callbackasm1(SB) + MOVD $1654, R12 + B callbackasm1(SB) + MOVD $1655, R12 + B callbackasm1(SB) + MOVD $1656, R12 + B callbackasm1(SB) + MOVD $1657, R12 + B callbackasm1(SB) + MOVD $1658, R12 + B callbackasm1(SB) + MOVD $1659, R12 + B callbackasm1(SB) + MOVD $1660, R12 + B callbackasm1(SB) + MOVD $1661, R12 + B callbackasm1(SB) + MOVD $1662, R12 + B callbackasm1(SB) + MOVD $1663, R12 + B callbackasm1(SB) + MOVD $1664, R12 + B callbackasm1(SB) + MOVD $1665, R12 + B callbackasm1(SB) + MOVD $1666, R12 + B callbackasm1(SB) + MOVD $1667, R12 + B callbackasm1(SB) + MOVD $1668, R12 + B callbackasm1(SB) + MOVD $1669, R12 + B callbackasm1(SB) + MOVD $1670, R12 + B callbackasm1(SB) + MOVD $1671, R12 + B callbackasm1(SB) + MOVD $1672, R12 + B callbackasm1(SB) + MOVD $1673, R12 + B callbackasm1(SB) + MOVD $1674, R12 + B callbackasm1(SB) + MOVD $1675, R12 + B callbackasm1(SB) + MOVD $1676, R12 + B callbackasm1(SB) + MOVD $1677, R12 + B callbackasm1(SB) + MOVD $1678, R12 + B callbackasm1(SB) + MOVD $1679, R12 + B callbackasm1(SB) + MOVD $1680, R12 + B callbackasm1(SB) + MOVD $1681, R12 + B callbackasm1(SB) + MOVD $1682, R12 + B callbackasm1(SB) + MOVD $1683, R12 + B callbackasm1(SB) + MOVD $1684, R12 + B callbackasm1(SB) + MOVD $1685, R12 + B callbackasm1(SB) + MOVD $1686, R12 + B callbackasm1(SB) + MOVD $1687, R12 + B callbackasm1(SB) + MOVD $1688, R12 + B callbackasm1(SB) + MOVD $1689, R12 + B callbackasm1(SB) + MOVD $1690, R12 + B callbackasm1(SB) + MOVD $1691, R12 + B callbackasm1(SB) + MOVD $1692, R12 + B callbackasm1(SB) + MOVD $1693, R12 + B callbackasm1(SB) + MOVD $1694, R12 + B callbackasm1(SB) + MOVD $1695, R12 + B callbackasm1(SB) + MOVD $1696, R12 + B callbackasm1(SB) + MOVD $1697, R12 + B callbackasm1(SB) + MOVD $1698, R12 + B callbackasm1(SB) + MOVD $1699, R12 + B callbackasm1(SB) + MOVD $1700, R12 + B callbackasm1(SB) + MOVD $1701, R12 + B callbackasm1(SB) + MOVD $1702, R12 + B callbackasm1(SB) + MOVD $1703, R12 + B callbackasm1(SB) + MOVD $1704, R12 + B callbackasm1(SB) + MOVD $1705, R12 + B callbackasm1(SB) + MOVD $1706, R12 + B callbackasm1(SB) + MOVD $1707, R12 + B callbackasm1(SB) + MOVD $1708, R12 + B callbackasm1(SB) + MOVD $1709, R12 + B callbackasm1(SB) + MOVD $1710, R12 + B callbackasm1(SB) + MOVD $1711, R12 + B callbackasm1(SB) + MOVD $1712, R12 + B callbackasm1(SB) + MOVD $1713, R12 + B callbackasm1(SB) + MOVD $1714, R12 + B callbackasm1(SB) + MOVD $1715, R12 + B callbackasm1(SB) + MOVD $1716, R12 + B callbackasm1(SB) + MOVD $1717, R12 + B callbackasm1(SB) + MOVD $1718, R12 + B callbackasm1(SB) + MOVD $1719, R12 + B callbackasm1(SB) + MOVD $1720, R12 + B callbackasm1(SB) + MOVD $1721, R12 + B callbackasm1(SB) + MOVD $1722, R12 + B callbackasm1(SB) + MOVD $1723, R12 + B callbackasm1(SB) + MOVD $1724, R12 + B callbackasm1(SB) + MOVD $1725, R12 + B callbackasm1(SB) + MOVD $1726, R12 + B callbackasm1(SB) + MOVD $1727, R12 + B callbackasm1(SB) + MOVD $1728, R12 + B callbackasm1(SB) + MOVD $1729, R12 + B callbackasm1(SB) + MOVD $1730, R12 + B callbackasm1(SB) + MOVD $1731, R12 + B callbackasm1(SB) + MOVD $1732, R12 + B callbackasm1(SB) + MOVD $1733, R12 + B callbackasm1(SB) + MOVD $1734, R12 + B callbackasm1(SB) + MOVD $1735, R12 + B callbackasm1(SB) + MOVD $1736, R12 + B callbackasm1(SB) + MOVD $1737, R12 + B callbackasm1(SB) + MOVD $1738, R12 + B callbackasm1(SB) + MOVD $1739, R12 + B callbackasm1(SB) + MOVD $1740, R12 + B callbackasm1(SB) + MOVD $1741, R12 + B callbackasm1(SB) + MOVD $1742, R12 + B callbackasm1(SB) + MOVD $1743, R12 + B callbackasm1(SB) + MOVD $1744, R12 + B callbackasm1(SB) + MOVD $1745, R12 + B callbackasm1(SB) + MOVD $1746, R12 + B callbackasm1(SB) + MOVD $1747, R12 + B callbackasm1(SB) + MOVD $1748, R12 + B callbackasm1(SB) + MOVD $1749, R12 + B callbackasm1(SB) + MOVD $1750, R12 + B callbackasm1(SB) + MOVD $1751, R12 + B callbackasm1(SB) + MOVD $1752, R12 + B callbackasm1(SB) + MOVD $1753, R12 + B callbackasm1(SB) + MOVD $1754, R12 + B callbackasm1(SB) + MOVD $1755, R12 + B callbackasm1(SB) + MOVD $1756, R12 + B callbackasm1(SB) + MOVD $1757, R12 + B callbackasm1(SB) + MOVD $1758, R12 + B callbackasm1(SB) + MOVD $1759, R12 + B callbackasm1(SB) + MOVD $1760, R12 + B callbackasm1(SB) + MOVD $1761, R12 + B callbackasm1(SB) + MOVD $1762, R12 + B callbackasm1(SB) + MOVD $1763, R12 + B callbackasm1(SB) + MOVD $1764, R12 + B callbackasm1(SB) + MOVD $1765, R12 + B callbackasm1(SB) + MOVD $1766, R12 + B callbackasm1(SB) + MOVD $1767, R12 + B callbackasm1(SB) + MOVD $1768, R12 + B callbackasm1(SB) + MOVD $1769, R12 + B callbackasm1(SB) + MOVD $1770, R12 + B callbackasm1(SB) + MOVD $1771, R12 + B callbackasm1(SB) + MOVD $1772, R12 + B callbackasm1(SB) + MOVD $1773, R12 + B callbackasm1(SB) + MOVD $1774, R12 + B callbackasm1(SB) + MOVD $1775, R12 + B callbackasm1(SB) + MOVD $1776, R12 + B callbackasm1(SB) + MOVD $1777, R12 + B callbackasm1(SB) + MOVD $1778, R12 + B callbackasm1(SB) + MOVD $1779, R12 + B callbackasm1(SB) + MOVD $1780, R12 + B callbackasm1(SB) + MOVD $1781, R12 + B callbackasm1(SB) + MOVD $1782, R12 + B callbackasm1(SB) + MOVD $1783, R12 + B callbackasm1(SB) + MOVD $1784, R12 + B callbackasm1(SB) + MOVD $1785, R12 + B callbackasm1(SB) + MOVD $1786, R12 + B callbackasm1(SB) + MOVD $1787, R12 + B callbackasm1(SB) + MOVD $1788, R12 + B callbackasm1(SB) + MOVD $1789, R12 + B callbackasm1(SB) + MOVD $1790, R12 + B callbackasm1(SB) + MOVD $1791, R12 + B callbackasm1(SB) + MOVD $1792, R12 + B callbackasm1(SB) + MOVD $1793, R12 + B callbackasm1(SB) + MOVD $1794, R12 + B callbackasm1(SB) + MOVD $1795, R12 + B callbackasm1(SB) + MOVD $1796, R12 + B callbackasm1(SB) + MOVD $1797, R12 + B callbackasm1(SB) + MOVD $1798, R12 + B callbackasm1(SB) + MOVD $1799, R12 + B callbackasm1(SB) + MOVD $1800, R12 + B callbackasm1(SB) + MOVD $1801, R12 + B callbackasm1(SB) + MOVD $1802, R12 + B callbackasm1(SB) + MOVD $1803, R12 + B callbackasm1(SB) + MOVD $1804, R12 + B callbackasm1(SB) + MOVD $1805, R12 + B callbackasm1(SB) + MOVD $1806, R12 + B callbackasm1(SB) + MOVD $1807, R12 + B callbackasm1(SB) + MOVD $1808, R12 + B callbackasm1(SB) + MOVD $1809, R12 + B callbackasm1(SB) + MOVD $1810, R12 + B callbackasm1(SB) + MOVD $1811, R12 + B callbackasm1(SB) + MOVD $1812, R12 + B callbackasm1(SB) + MOVD $1813, R12 + B callbackasm1(SB) + MOVD $1814, R12 + B callbackasm1(SB) + MOVD $1815, R12 + B callbackasm1(SB) + MOVD $1816, R12 + B callbackasm1(SB) + MOVD $1817, R12 + B callbackasm1(SB) + MOVD $1818, R12 + B callbackasm1(SB) + MOVD $1819, R12 + B callbackasm1(SB) + MOVD $1820, R12 + B callbackasm1(SB) + MOVD $1821, R12 + B callbackasm1(SB) + MOVD $1822, R12 + B callbackasm1(SB) + MOVD $1823, R12 + B callbackasm1(SB) + MOVD $1824, R12 + B callbackasm1(SB) + MOVD $1825, R12 + B callbackasm1(SB) + MOVD $1826, R12 + B callbackasm1(SB) + MOVD $1827, R12 + B callbackasm1(SB) + MOVD $1828, R12 + B callbackasm1(SB) + MOVD $1829, R12 + B callbackasm1(SB) + MOVD $1830, R12 + B callbackasm1(SB) + MOVD $1831, R12 + B callbackasm1(SB) + MOVD $1832, R12 + B callbackasm1(SB) + MOVD $1833, R12 + B callbackasm1(SB) + MOVD $1834, R12 + B callbackasm1(SB) + MOVD $1835, R12 + B callbackasm1(SB) + MOVD $1836, R12 + B callbackasm1(SB) + MOVD $1837, R12 + B callbackasm1(SB) + MOVD $1838, R12 + B callbackasm1(SB) + MOVD $1839, R12 + B callbackasm1(SB) + MOVD $1840, R12 + B callbackasm1(SB) + MOVD $1841, R12 + B callbackasm1(SB) + MOVD $1842, R12 + B callbackasm1(SB) + MOVD $1843, R12 + B callbackasm1(SB) + MOVD $1844, R12 + B callbackasm1(SB) + MOVD $1845, R12 + B callbackasm1(SB) + MOVD $1846, R12 + B callbackasm1(SB) + MOVD $1847, R12 + B callbackasm1(SB) + MOVD $1848, R12 + B callbackasm1(SB) + MOVD $1849, R12 + B callbackasm1(SB) + MOVD $1850, R12 + B callbackasm1(SB) + MOVD $1851, R12 + B callbackasm1(SB) + MOVD $1852, R12 + B callbackasm1(SB) + MOVD $1853, R12 + B callbackasm1(SB) + MOVD $1854, R12 + B callbackasm1(SB) + MOVD $1855, R12 + B callbackasm1(SB) + MOVD $1856, R12 + B callbackasm1(SB) + MOVD $1857, R12 + B callbackasm1(SB) + MOVD $1858, R12 + B callbackasm1(SB) + MOVD $1859, R12 + B callbackasm1(SB) + MOVD $1860, R12 + B callbackasm1(SB) + MOVD $1861, R12 + B callbackasm1(SB) + MOVD $1862, R12 + B callbackasm1(SB) + MOVD $1863, R12 + B callbackasm1(SB) + MOVD $1864, R12 + B callbackasm1(SB) + MOVD $1865, R12 + B callbackasm1(SB) + MOVD $1866, R12 + B callbackasm1(SB) + MOVD $1867, R12 + B callbackasm1(SB) + MOVD $1868, R12 + B callbackasm1(SB) + MOVD $1869, R12 + B callbackasm1(SB) + MOVD $1870, R12 + B callbackasm1(SB) + MOVD $1871, R12 + B callbackasm1(SB) + MOVD $1872, R12 + B callbackasm1(SB) + MOVD $1873, R12 + B callbackasm1(SB) + MOVD $1874, R12 + B callbackasm1(SB) + MOVD $1875, R12 + B callbackasm1(SB) + MOVD $1876, R12 + B callbackasm1(SB) + MOVD $1877, R12 + B callbackasm1(SB) + MOVD $1878, R12 + B callbackasm1(SB) + MOVD $1879, R12 + B callbackasm1(SB) + MOVD $1880, R12 + B callbackasm1(SB) + MOVD $1881, R12 + B callbackasm1(SB) + MOVD $1882, R12 + B callbackasm1(SB) + MOVD $1883, R12 + B callbackasm1(SB) + MOVD $1884, R12 + B callbackasm1(SB) + MOVD $1885, R12 + B callbackasm1(SB) + MOVD $1886, R12 + B callbackasm1(SB) + MOVD $1887, R12 + B callbackasm1(SB) + MOVD $1888, R12 + B callbackasm1(SB) + MOVD $1889, R12 + B callbackasm1(SB) + MOVD $1890, R12 + B callbackasm1(SB) + MOVD $1891, R12 + B callbackasm1(SB) + MOVD $1892, R12 + B callbackasm1(SB) + MOVD $1893, R12 + B callbackasm1(SB) + MOVD $1894, R12 + B callbackasm1(SB) + MOVD $1895, R12 + B callbackasm1(SB) + MOVD $1896, R12 + B callbackasm1(SB) + MOVD $1897, R12 + B callbackasm1(SB) + MOVD $1898, R12 + B callbackasm1(SB) + MOVD $1899, R12 + B callbackasm1(SB) + MOVD $1900, R12 + B callbackasm1(SB) + MOVD $1901, R12 + B callbackasm1(SB) + MOVD $1902, R12 + B callbackasm1(SB) + MOVD $1903, R12 + B callbackasm1(SB) + MOVD $1904, R12 + B callbackasm1(SB) + MOVD $1905, R12 + B callbackasm1(SB) + MOVD $1906, R12 + B callbackasm1(SB) + MOVD $1907, R12 + B callbackasm1(SB) + MOVD $1908, R12 + B callbackasm1(SB) + MOVD $1909, R12 + B callbackasm1(SB) + MOVD $1910, R12 + B callbackasm1(SB) + MOVD $1911, R12 + B callbackasm1(SB) + MOVD $1912, R12 + B callbackasm1(SB) + MOVD $1913, R12 + B callbackasm1(SB) + MOVD $1914, R12 + B callbackasm1(SB) + MOVD $1915, R12 + B callbackasm1(SB) + MOVD $1916, R12 + B callbackasm1(SB) + MOVD $1917, R12 + B callbackasm1(SB) + MOVD $1918, R12 + B callbackasm1(SB) + MOVD $1919, R12 + B callbackasm1(SB) + MOVD $1920, R12 + B callbackasm1(SB) + MOVD $1921, R12 + B callbackasm1(SB) + MOVD $1922, R12 + B callbackasm1(SB) + MOVD $1923, R12 + B callbackasm1(SB) + MOVD $1924, R12 + B callbackasm1(SB) + MOVD $1925, R12 + B callbackasm1(SB) + MOVD $1926, R12 + B callbackasm1(SB) + MOVD $1927, R12 + B callbackasm1(SB) + MOVD $1928, R12 + B callbackasm1(SB) + MOVD $1929, R12 + B callbackasm1(SB) + MOVD $1930, R12 + B callbackasm1(SB) + MOVD $1931, R12 + B callbackasm1(SB) + MOVD $1932, R12 + B callbackasm1(SB) + MOVD $1933, R12 + B callbackasm1(SB) + MOVD $1934, R12 + B callbackasm1(SB) + MOVD $1935, R12 + B callbackasm1(SB) + MOVD $1936, R12 + B callbackasm1(SB) + MOVD $1937, R12 + B callbackasm1(SB) + MOVD $1938, R12 + B callbackasm1(SB) + MOVD $1939, R12 + B callbackasm1(SB) + MOVD $1940, R12 + B callbackasm1(SB) + MOVD $1941, R12 + B callbackasm1(SB) + MOVD $1942, R12 + B callbackasm1(SB) + MOVD $1943, R12 + B callbackasm1(SB) + MOVD $1944, R12 + B callbackasm1(SB) + MOVD $1945, R12 + B callbackasm1(SB) + MOVD $1946, R12 + B callbackasm1(SB) + MOVD $1947, R12 + B callbackasm1(SB) + MOVD $1948, R12 + B callbackasm1(SB) + MOVD $1949, R12 + B callbackasm1(SB) + MOVD $1950, R12 + B callbackasm1(SB) + MOVD $1951, R12 + B callbackasm1(SB) + MOVD $1952, R12 + B callbackasm1(SB) + MOVD $1953, R12 + B callbackasm1(SB) + MOVD $1954, R12 + B callbackasm1(SB) + MOVD $1955, R12 + B callbackasm1(SB) + MOVD $1956, R12 + B callbackasm1(SB) + MOVD $1957, R12 + B callbackasm1(SB) + MOVD $1958, R12 + B callbackasm1(SB) + MOVD $1959, R12 + B callbackasm1(SB) + MOVD $1960, R12 + B callbackasm1(SB) + MOVD $1961, R12 + B callbackasm1(SB) + MOVD $1962, R12 + B callbackasm1(SB) + MOVD $1963, R12 + B callbackasm1(SB) + MOVD $1964, R12 + B callbackasm1(SB) + MOVD $1965, R12 + B callbackasm1(SB) + MOVD $1966, R12 + B callbackasm1(SB) + MOVD $1967, R12 + B callbackasm1(SB) + MOVD $1968, R12 + B callbackasm1(SB) + MOVD $1969, R12 + B callbackasm1(SB) + MOVD $1970, R12 + B callbackasm1(SB) + MOVD $1971, R12 + B callbackasm1(SB) + MOVD $1972, R12 + B callbackasm1(SB) + MOVD $1973, R12 + B callbackasm1(SB) + MOVD $1974, R12 + B callbackasm1(SB) + MOVD $1975, R12 + B callbackasm1(SB) + MOVD $1976, R12 + B callbackasm1(SB) + MOVD $1977, R12 + B callbackasm1(SB) + MOVD $1978, R12 + B callbackasm1(SB) + MOVD $1979, R12 + B callbackasm1(SB) + MOVD $1980, R12 + B callbackasm1(SB) + MOVD $1981, R12 + B callbackasm1(SB) + MOVD $1982, R12 + B callbackasm1(SB) + MOVD $1983, R12 + B callbackasm1(SB) + MOVD $1984, R12 + B callbackasm1(SB) + MOVD $1985, R12 + B callbackasm1(SB) + MOVD $1986, R12 + B callbackasm1(SB) + MOVD $1987, R12 + B callbackasm1(SB) + MOVD $1988, R12 + B callbackasm1(SB) + MOVD $1989, R12 + B callbackasm1(SB) + MOVD $1990, R12 + B callbackasm1(SB) + MOVD $1991, R12 + B callbackasm1(SB) + MOVD $1992, R12 + B callbackasm1(SB) + MOVD $1993, R12 + B callbackasm1(SB) + MOVD $1994, R12 + B callbackasm1(SB) + MOVD $1995, R12 + B callbackasm1(SB) + MOVD $1996, R12 + B callbackasm1(SB) + MOVD $1997, R12 + B callbackasm1(SB) + MOVD $1998, R12 + B callbackasm1(SB) + MOVD $1999, R12 + B callbackasm1(SB) diff --git a/vendor/github.com/elastic/go-grok/.gitignore b/vendor/github.com/elastic/go-grok/.gitignore new file mode 100644 index 00000000000..3b735ec4a8c --- /dev/null +++ b/vendor/github.com/elastic/go-grok/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/elastic/go-grok/.go-version b/vendor/github.com/elastic/go-grok/.go-version new file mode 100644 index 00000000000..054c858fbf3 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/.go-version @@ -0,0 +1 @@ +1.22.5 \ No newline at end of file diff --git a/vendor/github.com/oklog/run/LICENSE b/vendor/github.com/elastic/go-grok/LICENSE similarity index 100% rename from vendor/github.com/oklog/run/LICENSE rename to vendor/github.com/elastic/go-grok/LICENSE diff --git a/vendor/github.com/elastic/go-grok/NOTICE.txt b/vendor/github.com/elastic/go-grok/NOTICE.txt new file mode 100644 index 00000000000..ecf8e3c9234 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/NOTICE.txt @@ -0,0 +1,2024 @@ +Go-Grok +Copyright 2022-2024 Elasticsearch BV + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +================================================================================ +Third party libraries used by the Go-Grok Libraries: +================================================================================ + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-licenser +Version: v0.4.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-licenser@v0.4.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch B.V. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/magefile/mage +Version: v1.15.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.15.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/testify +Version: v1.9.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.9.0/LICENSE: + +MIT License + +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/go-licence-detector +Version: v0.6.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/go-licence-detector@v0.6.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/tools +Version: v0.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/tools@v0.13.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +================================================================================ +Indirect dependencies + + +-------------------------------------------------------------------------------- +Dependency : github.com/cyphar/filepath-securejoin +Version: v0.2.4 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cyphar/filepath-securejoin@v0.2.4/LICENSE: + +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/davecgh/go-spew +Version: v1.1.1 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gobuffalo/here +Version: v0.6.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gobuffalo/here@v0.6.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/go-cmp +Version: v0.2.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-cmp@v0.2.0/LICENSE: + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/licenseclassifier +Version: v0.0.0-20200402202327-879cb1424de0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifier@v0.0.0-20200402202327-879cb1424de0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/karrick/godirwalk +Version: v1.15.6 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/karrick/godirwalk@v1.15.6/LICENSE: + +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pretty +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.1.0/License: + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pty +Version: v1.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pty@v1.1.1/License: + +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/text +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/text@v0.1.0/License: + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/markbates/pkger +Version: v0.17.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/markbates/pkger@v0.17.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pkg/errors +Version: v0.8.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.8.1/LICENSE: + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pmezard/go-difflib +Version: v1.0.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pmezard/go-difflib@v1.0.0/LICENSE: + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/sergi/go-diff +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/sergi/go-diff@v1.1.0/LICENSE: + +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/objx +Version: v0.5.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/objx@v0.5.2/LICENSE: + +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/yuin/goldmark +Version: v1.4.13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/yuin/goldmark@v1.4.13/LICENSE: + +MIT License + +Copyright (c) 2019 Yusuke Inuzuka + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/crypto +Version: v0.0.0-20191011191535-87dc89f01550 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20191011191535-87dc89f01550/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/lint +Version: v0.0.0-20210508222113-6edffad5e616 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/lint@v0.0.0-20210508222113-6edffad5e616/LICENSE: + +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/mod +Version: v0.17.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.17.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.15.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.15.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sync +Version: v0.7.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.7.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sys +Version: v0.12.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.12.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.0.0-20201126162022-7de9c90e9dd1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.0.0-20201126162022-7de9c90e9dd1/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/text +Version: v0.3.6 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.6/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/xerrors +Version: v0.0.0-20200804184101-5ec99f83aff1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20200804184101-5ec99f83aff1/LICENSE: + +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/check.v1 +Version: v1.0.0-20190902080502-41f04d3bba15 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/check.v1@v1.0.0-20190902080502-41f04d3bba15/LICENSE: + +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v2 +Version: v2.2.7 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v2@v2.2.7/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v3 +Version: v3.0.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.1/LICENSE: + + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + diff --git a/vendor/github.com/elastic/go-grok/README.md b/vendor/github.com/elastic/go-grok/README.md new file mode 100644 index 00000000000..9f0bade8ba7 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/README.md @@ -0,0 +1,223 @@ +# Grok + +Grok is grok parsing library based on `re2` regexp. + +## Usage + +#### Basic usage: +```go +g := grok.New() + +// use custom patterns +patternDefinitions := map[string]string{ + // patterns can be nested + "NGINX_HOST": `(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?`, + // NGINX_NOTSEPARATOR is used in NGINX_HOST. IP and NUMBER are part of default pattern set + "NGINX_NOTSEPARATOR": `"[^\t ,:]+"`, +} +g.AddPatterns(patternDefinitions) + +// compile grok before use, this will generate regex.Regex based on pattern and +// subpatterns provided. +// this needs to be performed just once. +err := g.Compile("%{NGINX_HOST}", true) + +res, err := g.ParseString("127.0.0.1:1234") +``` + +results in: +```go +map[string]string { + "destination.ip": "127.0.0.1", + "destination.port": "1234", +} +``` + + +#### Unnamed usage: + +In this case we changed +`err := g.Compile("%{NGINX_HOST}", false)` to +`err := g.Compile("%{NGINX_HOST}", true)` +allowing unnamed return matches. In case of unnamed match, definition name is used. + +```go +g := grok.New() + +// use custom patterns +patternDefinitions := map[string]string{ + // patterns can be nested + "NGINX_HOST": `(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?`, + // NGINX_NOTSEPARATOR is used in NGINX_HOST. IP and NUMBER are part of default pattern set + "NGINX_NOTSEPARATOR": `"[^\t ,:]+"`, +} +g.AddPatterns(patternDefinitions) + +// compile grok before use, this will generate regex.Regex based on pattern and +// subpatterns provided +err := g.Compile("%{NGINX_HOST}", false) + +res, err := g.ParseString("127.0.0.1:1234") +``` + +results in: +```go +map[string]string { + "NGINX_HOST": "127.0.0.1:1234", + "destination.ip": "127.0.0.1", + "IPV4": "127.0.0.1", + "destination.port": "1234", + "BASE10NUM": "1234", +} +``` + + +#### Typed arguments usage: + +In this case we're marking `destination.port` as `int` using definition `%{NUMBER:destination.port:int}`. + +```go +g := grok.New() + +// use custom patterns +patternDefinitions := map[string]string{ + "NGINX_HOST": `(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port:int})?`, + "NGINX_NOTSEPARATOR": `"[^\t ,:]+"`, +} +g.AddPatterns(patternDefinitions) + +// compile grok before use, this will generate regex.Regex based on pattern and +// subpatterns provided +err := g.Compile("%{NGINX_HOST}", true) + +res, err := g.ParseTypedString("127.0.0.1:1234") +``` + +See type changed from `map[string]string` to `map[string]interface{}` and `destination.port` is now a number: +```go +map[string]interface {} { + "destination.ip": "127.0.0.1", + "destination.port": 1234, +} +``` + +## Benchmarks + +Comparing to [github.com/vjeantet/grok](https://github.com/vjeantet/grok) and more optimized version based on previous one [github.com/trivago/grok](https://github.com/trivago/grok) + +``` +BenchmarkParseString-10 15466 76811 ns/op 4578 B/op 5 allocs/op +BenchmarkParseStringRegexp-10 15351 77109 ns/op 3840 B/op 3 allocs/op +BenchmarkParseStringTrivago-10 15868 76416 ns/op 4593 B/op 5 allocs/op +BenchmarkParseStringVjeanet-10 15548 77111 ns/op 5897 B/op 6 allocs/op + +BenchmarkNestedParseString-10 42201 28908 ns/op 3463 B/op 4 allocs/op +BenchmarkNestedParseStringTrivago-10 41937 28836 ns/op 3449 B/op 4 allocs/op +BenchmarkNestedParseStringVjeanet-10 41080 29174 ns/op 4045 B/op 5 allocs/op + +BenchmarkTypedParseString-10 39934 29707 ns/op 3851 B/op 9 allocs/op +BenchmarkTypedParseStringTrivago-10 40146 29238 ns/op 3475 B/op 6 allocs/op +BenchmarkTypedParseStringVjeanet-10 39931 30616 ns/op 4196 B/op 14 allocs/op +``` + + +## Default set of patterns + +This library comes with a default set of patterns defined in `patterns/default.go` file. +You can include more predefined patterns from `patterns/*.go` like so + +```go +g := grok.New() +g.AddPatterns(patterns.Rails) // to include whole set +g.AddPattern(patterns.Ruby["RUBY_LOGLEVEL"]) // to include specific one +``` + +Default set consists of: + +| Name | Example | +|-----|-----| +| WORD | "hello", "world123", "test_data" | +| NOTSPACE | "example", "text-with-dashes", "12345" | +| SPACE | " ", "\t", " " | +| INT | "123", "-456", "+789" | +| NUMBER | "123", "456.789", "-0.123" | +| BOOL |"true", "false", "true" | +| BASE10NUM | "123", "-123.456", "0.789" | +| BASE16NUM | "1a2b", "0x1A2B", "-0x1a2b3c" | +| BASE16FLOAT | "0x1.a2b3", "-0x1A2B3C.D" | +| POSINT | "123", "456", "789" | +| NONNEGINT | "0", "123", "456" | +| GREEDYDATA |"anything goes", "literally anything", "123 #@!" | +| QUOTEDSTRING | "\"This is a quote\"", "'single quoted'" | +| UUID |"123e4567-e89b-12d3-a456-426614174000" | +| URN | "urn:isbn:0451450523", "urn:ietf:rfc:2648" | + +#### Network patterns + +| Name | Example | +|-----|-----| +| IP | "192.168.1.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"| +| IPV6 | "2001:0db8:85a3:0000:0000:8a2e:0370:7334", " |:1", "fe80::1ff:fe23:4567:890a" | +| IPV4 | "192.168.1.1", "10.0.0.1", "172.16.254.1" | +| IPORHOST | "example.com", "192.168.1.1", "fe80::1ff:fe23:4567:890a" | +| HOSTNAME | "example.com", "sub.domain.co.uk", "localhost" | +| EMAILLOCALPART | "john.doe", "alice123", "bob-smith" | +| EMAILADDRESS |"john.doe@example.com", "alice123@domain.co.uk" | +| USERNAME | "user1", "john.doe", "alice_123" | +| USER | "user1", "john.doe", "alice_123" | +| MAC |"00:1A:2B:3C:4D:5E", "001A.2B3C.4D5E" | +| CISCOMAC | "001A.2B3C.4D5E", "001B.2C3D.4E5F", "001C.2D3E.4F5A" | +| WINDOWSMAC | "00-1A-2B-3C-4D-5E", "00-1B-2C-3D-4E-5F" | +| COMMONMAC |"00:1A:2B:3C:4D:5E", "00:1B:2C:3D:4E:5F" | +| HOSTPORT | "example.com:80", "192.168.1.1:8080" | + +#### Paths patterns + +| Name | Example | +|-----|-----| +| UNIXPATH | "/home/user", "/var/log/syslog", "/tmp/abc_123" | +| TTY | "/dev/pts/1", "/dev/tty0", "/dev/ttyS0" | +| WINPATH |"C:\\Program Files\\App", "D:\\Work\\project\\file.txt" | +| URIPROTO | "http", "https", "ftp" | +| URIHOST |"example.com", "192.168.1.1:8080" | +| URIPATH |"/path/to/resource", "/another/path", "/root" | +| URIQUERY | "key=value", "search=query&active=true" | +| URIPARAM | "?key=value", "?search=query&active=true" | +| URIPATHPARAM | "/path?query=1", "/folder/path?valid=true" | +| PATH |"/home/user/documents", "C:\\Windows\\system32", "/var/log/syslog" | + +#### Datetime patterns + +| Name | Example | +|-----|-----| +| MONTH | "January", "Feb", "March", "Apr", "May", "Jun", "Jul", "August", "September", "October", "Nov", "December" | +| MONTHNUM | "01", "02", "03", ... "11", "12" | +| DAY | "Monday", "Tuesday", ... "Sunday" | +| YEAR |"1999", "2000", "2021" | +| HOUR |"00", "12", "23" | +| MINUTE | "00", "30", "59" | +| SECOND | "00", "30", "60" | +| TIME |"14:30", "23:59:59", "12:00:00", "12:00:60" | +| DATE_US |"04/21/2022", "12-25-2020", "07/04/1999" | +| DATE_EU |"21.04.2022", "25/12/2020", "04-07-1999" | +| ISO8601_TIMEZONE |"Z", "+02:00", "-05:00" | +| ISO8601_SECOND | "59", "30", "60.123" | +| TIMESTAMP_ISO8601 | "2022-04-21T14:30:00Z", "2020-12-25T23:59:59+02:00", "1999-07-04T12:00:00-05:00" | +| DATE |"04/21/2022", "21.04.2022", "12-25-2020" | +| DATESTAMP | "04/21/2022 14:30", "21.04.2022 23:59", "12-25-2020 12:00" | +| TZ | "EST", "CET", "PDT" | +| DATESTAMP_RFC822 |"Wed Jan 12 2024 14:33 EST" | +| DATESTAMP_RFC2822 | "Tue, 12 Jan 2022 14:30 +0200", "Fri, 25 Dec 2020 23:59 -0500", "Sun, 04 Jul 1999 12:00 Z" | +| DATESTAMP_OTHER | "Tue Jan 12 14:30 EST 2022", "Fri Dec 25 23:59 CET 2020", "Sun Jul 04 12:00 PDT 1999" | +| DATESTAMP_EVENTLOG | "20220421143000", "20201225235959", "19990704120000" | + +#### Syslog patterns + +| Name | Example | +|-----|-----| +| SYSLOGTIMESTAMP | "Jan 1 00:00:00", "Mar 15 12:34:56", "Dec 31 23:59:59" | +| PROG |"sshd", "kernel", "cron" | +| SYSLOGPROG |"sshd[1234]", "kernel", "cron[5678]" | +| SYSLOGHOST |"example.com", "192.168.1.1", "localhost" | +| SYSLOGFACILITY | "<1.2>", "<12345.13456>" | +| HTTPDATE | "25/Dec/2024:14:33 4" | \ No newline at end of file diff --git a/vendor/github.com/elastic/go-grok/catalog-info.yaml b/vendor/github.com/elastic/go-grok/catalog-info.yaml new file mode 100644 index 00000000000..30d839fa811 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/catalog-info.yaml @@ -0,0 +1,58 @@ +# Declare a Backstage Component that represents your application. +--- +# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: go-grok + description: Libraries used by Elastic Otel processors + +spec: + type: library + owner: group:ingest-fp + system: platform-ingest + lifecycle: production +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-go-grok + description: Buildkite Pipeline for go-grok + links: + - title: Pipeline + url: https://buildkite.com/elastic/go-grok + +spec: + type: buildkite-pipeline + owner: group:ingest-fp + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: go-grok + description: Buildkite pipeline for the go-grok library + spec: + branch_configuration: "main" + repository: elastic/go-grok + pipeline_file: ".buildkite/pipeline.yml" + maximum_timeout_in_minutes: 60 + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: '!main' + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: '!main' + teams: + elastic-agent-control-plane: + access_level: MANAGE_BUILD_AND_READ + ingest-fp: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go new file mode 100644 index 00000000000..def7290ae26 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/benchmark.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +const ( + goBenchstat = "golang.org/x/perf/cmd/benchstat@v0.0.0-20230227161431-f7320a6d63e8" +) + +var ( + benchmarkCount = 8 +) + +// Benchmark namespace for mage to group all the related targets under this namespace +type Benchmark mg.Namespace + +// Deps installs required plugins for reading benchmarks results +func (Benchmark) Deps() error { + err := gotool.Install(gotool.Install.Package(goBenchstat)) + if err != nil { + return err + } + return nil +} + +// Run execute the go benchmark tests for this repository, by defining the variable OUTPUT you write the results +// into a file. Optional you can set BENCH_COUNT to how many benchmark iteration you want to execute, default is 8 +func (Benchmark) Run() error { + mg.Deps(Benchmark.Deps) + log.Println(">> go Test: Benchmark") + outputFile := os.Getenv("OUTPUT") + benchmarkCountOverride := os.Getenv("BENCH_COUNT") + if benchmarkCountOverride != "" { + var overrideErr error + benchmarkCount, overrideErr = strconv.Atoi(benchmarkCountOverride) + if overrideErr != nil { + return fmt.Errorf("failed to parse BENCH_COUNT, verify that you set the right value: , %w", overrideErr) + } + } + projectPackages, er := gotool.ListProjectPackages() + if er != nil { + return fmt.Errorf("failed to list package dependencies: %w", er) + } + cmdArg := fmt.Sprintf("test -count=%d -bench=Bench -run=Bench", benchmarkCount) + cmdArgs := strings.Split(cmdArg, " ") + for _, pkg := range projectPackages { + cmdArgs = append(cmdArgs, filepath.Join(pkg, "/...")) + } + _, err := runCommand(nil, "go", outputFile, cmdArgs...) + + var goTestErr *exec.ExitError + switch { + case goTestErr == nil: + return nil + case errors.As(err, &goTestErr): + return fmt.Errorf("failed to execute go test -bench command: %w", err) + default: + return fmt.Errorf("failed to execute go test -bench command %w", err) + } +} + +// Diff parse one or more benchmark outputs, Required environment variables are BASE for parsing results +// and NEXT to compare the base results with. Optional you can define OUTPUT to write the results into a file +func (Benchmark) Diff() error { + mg.Deps(Benchmark.Deps) + log.Println(">> running: benchstat") + outputFile := os.Getenv("OUTPUT") + baseFile := os.Getenv("BASE") + nextFile := os.Getenv("NEXT") + var args []string + if baseFile == "" { + log.Printf("Missing required parameter BASE parameter to parse the results. Please set this to a filepath of the benchmark results") + return fmt.Errorf("missing required parameter BASE parameter to parse the results. Please set this to a filepath of the benchmark results") + } else { + args = append(args, baseFile) + } + if nextFile == "" { + log.Printf("Missing NEXT parameter, we are not going to compare results") + } else { + args = append(args, nextFile) + } + + _, err := runCommand(nil, "benchstat", outputFile, args...) + + var goTestErr *exec.ExitError + switch { + case goTestErr == nil: + return nil + case errors.As(err, &goTestErr): + return fmt.Errorf("failed to execute benchstat command: %w", err) + default: + return fmt.Errorf("failed to execute benchstat command!! %w", err) + } + +} + +// runCommand is executing a command that is represented by cmd. +// when defining an outputFile it will write the stdErr, stdOut of that command to the output file +// otherwise it will capture it to stdErr, stdOut of the console used and return true, nil, if succeed +func runCommand(env map[string]string, cmd string, outputFile string, args ...string) (bool, error) { + var stdOut io.Writer + var stdErr io.Writer + if outputFile != "" { + fileOutput, err := os.Create(createDir(outputFile)) + if err != nil { + return false, fmt.Errorf("failed to create %s output file: %w", cmd, err) + } + defer func(fileOutput *os.File) { + err := fileOutput.Close() + if err != nil { + log.Fatalf("Failed to close file %s", err) + } + }(fileOutput) + stdOut = io.MultiWriter(os.Stdout, fileOutput) + stdErr = io.MultiWriter(os.Stderr, fileOutput) + } else { + stdOut = os.Stdout + stdErr = os.Stderr + } + return sh.Exec(env, stdOut, stdErr, cmd, args...) +} + +// createDir creates the parent directory for the given file. +func createDir(file string) string { + // Create the output directory. + if dir := filepath.Dir(file); dir != "." { + if err := os.MkdirAll(dir, 0755); err != nil { + log.Fatalf("Failed to create parent dir for %s", file) + } + } + return file +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/check.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/check.go new file mode 100644 index 00000000000..50e843ea2f8 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/check.go @@ -0,0 +1,48 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/sh" +) + +func CheckNoChanges() error { + fmt.Println(">> fmt - go run") + err := sh.RunV("go", "mod", "tidy", "-v") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff") + err = sh.RunV("git", "diff") + if err != nil { + return fmt.Errorf("failed running git diff, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git update-index") + err = sh.RunV("git", "update-index", "--refresh") + if err != nil { + return fmt.Errorf("failed running git update-index --refresh, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff-index") + err = sh.RunV("git", "diff-index", "--exit-code", "HEAD", "--") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + return nil +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go new file mode 100644 index 00000000000..4f4e179182b --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/deps.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/mg" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +// Deps contains targets related to checking dependencies +type Deps mg.Namespace + +// CheckModuleTidy checks if `go mod tidy` was run before the last commit. +func (Deps) CheckModuleTidy() error { + err := gotool.Mod.Tidy() + if err != nil { + return err + } + err = assertUnchanged("go.mod") + if err != nil { + return fmt.Errorf("`go mod tidy` was not called before the last commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go new file mode 100644 index 00000000000..0237a86a283 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/fmt.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +const ( + // GoImportsImportPath controls the import path used to install goimports. + GoImportsImportPath = "golang.org/x/tools/cmd/goimports" + + // GoImportsLocalPrefix is a string prefix matching imports that should be + // grouped after third-party packages. + GoImportsLocalPrefix = "github.com/elastic" +) + +// Linter contains targets related to linting the Go code +type GoImports mg.Namespace + +// Run executes goimports against all .go files in and below the CWD. +func (GoImports) Run() error { + mg.Deps(GoImports.Install) + goFiles, err := FindFilesRecursive(func(path string, _ os.FileInfo) bool { + return filepath.Ext(path) == ".go" + }) + if err != nil { + return err + } + if len(goFiles) == 0 { + return nil + } + + fmt.Println(">> fmt - goimports: Formatting Go code") //nolint:forbidigo // it's a mage target + args := append( + []string{"-local", GoImportsLocalPrefix, "-l", "-w"}, + goFiles..., + ) + + return sh.RunV("goimports", args...) +} + +func (GoImports) Install() error { + err := gotool.Install(gotool.Install.Package(filepath.Join(GoImportsImportPath))) + if err != nil { + return fmt.Errorf("cannot install GoImports: %w", err) + } + + return nil +} + +// FindFilesRecursive recursively traverses from the CWD and invokes the given +// match function on each regular file to determine if the given path should be +// returned as a match. It ignores files in .git directories. +func FindFilesRecursive(match func(path string, info os.FileInfo) bool) ([]string, error) { + var matches []string + walkDir := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Don't look for files in git directories + if d.IsDir() && filepath.Base(path) == ".git" { + return filepath.SkipDir + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("canot get FileInfo: %w", err) + } + if !info.Mode().IsRegular() { + // continue + return nil + } + + if match(filepath.ToSlash(path), info) { + matches = append(matches, path) + } + return nil + } + + err := filepath.WalkDir(".", fs.WalkDirFunc(walkDir)) + return matches, err +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go new file mode 100644 index 00000000000..9770cc1a1b5 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/get.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +type goGet func(opts ...ArgOpt) error +type goDownload func(opts ...ArgOpt) error + +// Get runs `go get` and provides optionals for adding command line arguments. +var Get goGet = runGoGet + +func runGoGet(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("get", args) +} + +func (goGet) Download() ArgOpt { return flagBoolIf("-d", true) } +func (goGet) Update() ArgOpt { return flagBoolIf("-u", true) } +func (goGet) Package(pkg string) ArgOpt { return posArg(pkg) } + +// Download runs `go download` and provides optionals for adding command line arguments. +var Download goDownload = runGoDownload + +func runGoDownload(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("download", args) +} + +func (goDownload) All() ArgOpt { return posArg("all") } diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go new file mode 100644 index 00000000000..b7a833cfe4f --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/go.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import ( + "fmt" + "os" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// Args holds parameters, environment variables and flag information used to +// pass to the go tool. +type Args struct { + extra map[string]string // extra flags one can pass to the command + env map[string]string + flags map[string][]string + pos []string +} + +// ArgOpt is a functional option adding info to Args once executed. +type ArgOpt func(args *Args) + +type goInstall func(opts ...ArgOpt) error + +// Install runs `go install` and provides optionals for adding command line arguments. +var Install goInstall = runGoInstall + +func runGoInstall(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("install", args) +} + +func (goInstall) Package(pkg string) ArgOpt { return posArg(pkg) } +func (goInstall) Vendored() ArgOpt { return flagArg("-mod", "vendor") } + +type goTest func(opts ...ArgOpt) error + +// Test runs `go test` and provides optionals for adding command line arguments. +var Test goTest = runGoTest + +// GetModuleName returns the name of the module. +func GetModuleName() (string, error) { + lines, err := getLines(callGo(nil, "list", "-m")) + if err != nil { + return "", err + } + if len(lines) != 1 { + return "", fmt.Errorf("unexpected number of lines") + } + return lines[0], nil +} + +// ListProjectPackages lists all packages in the current project +func ListProjectPackages() ([]string, error) { + return ListPackages("./...") +} + +// ListPackages calls `go list` for every package spec given. +func ListPackages(pkgs ...string) ([]string, error) { + return getLines(callGo(nil, "list", pkgs...)) +} + +// ListDeps calls `go list -dep` for every package spec given. +func ListDeps(pkg string) ([]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}}{{end}}` + + return getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) +} + +func ListDepsForNotice() (string, error) { + return callGo(nil, "list", "-json", "-m", "all") +} + +// ListDepsLocation calls `go list -dep` for every package spec given. +func ListDepsLocation(pkg string) (map[string]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}};{{.Dir}}{{end}}` + + lines, err := getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) + if err != nil { + return nil, err + } + deps := make(map[string]string, len(lines)) + for _, l := range lines { + parts := strings.Split(l, ";") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid number of parts") + } + deps[parts[0]] = parts[1] + } + return deps, nil +} + +// ListTestFiles lists all go and cgo test files available in a package. +func ListTestFiles(pkg string) ([]string, error) { + const tmpl = `{{ range .TestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + `{{ range .XTestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + return getLines(callGo(nil, "list", "-f", tmpl, pkg)) +} + +// ListModuleCacheDir returns the module cache directory containing +// the specified module. If the module does not exist in the cache, +// an error will be returned. +func ListModuleCacheDir(pkg string) (string, error) { + return listModuleDir(pkg, false) +} + +// ListModuleVendorDir returns the vendor directory containing the +// specified module. If the module has not been vendored, an error +// will be returned. +func ListModuleVendorDir(pkg string) (string, error) { + return listModuleDir(pkg, true) +} + +func listModuleDir(pkg string, vendor bool) (string, error) { + env := map[string]string{ + // Make sure GOFLAGS does not influence behaviour. + "GOFLAGS": "", + } + args := []string{"-m", "-f", "{{.Dir}}"} + if vendor { + args = append(args, "-mod=vendor") + } + args = append(args, pkg) + lines, err := getLines(callGo(env, "list", args...)) + if err != nil { + return "", err + } + if n := len(lines); n != 1 { + return "", fmt.Errorf("expected 1 line, got %d while looking for %s", n, pkg) + } + return lines[0], nil +} + +// HasTests returns true if the given package contains test files. +func HasTests(pkg string) (bool, error) { + files, err := ListTestFiles(pkg) + if err != nil { + return false, err + } + return len(files) > 0, nil +} + +func (goTest) WithCoverage(to string) ArgOpt { + return combine(flagArg("-cover", ""), flagArgIf("-test.coverprofile", to)) +} +func (goTest) Short(b bool) ArgOpt { return flagBoolIf("-test.short", b) } +func (goTest) Use(bin string) ArgOpt { return extraArgIf("use", bin) } +func (goTest) OS(os string) ArgOpt { return envArgIf("GOOS", os) } +func (goTest) ARCH(arch string) ArgOpt { return envArgIf("GOARCH", arch) } +func (goTest) Create() ArgOpt { return flagArg("-c", "") } +func (goTest) Out(path string) ArgOpt { return flagArg("-o", path) } +func (goTest) Package(path string) ArgOpt { return posArg(path) } +func (goTest) Verbose() ArgOpt { return flagArg("-test.v", "") } +func runGoTest(opts ...ArgOpt) error { + args := buildArgs(opts) + if bin := args.Val("use"); bin != "" { + flags := map[string][]string{} + for k, v := range args.flags { + if strings.HasPrefix(k, "-test.") { + flags[k] = v + } + } + + useArgs := &Args{} + *useArgs = *args + useArgs.flags = flags + + _, err := sh.Exec(useArgs.env, os.Stdout, os.Stderr, bin, useArgs.build()...) + return err + } + + return runVGo("test", args) +} + +func getLines(out string, err error) ([]string, error) { + if err != nil { + return nil, err + } + + lines := strings.Split(out, "\n") + res := lines[:0] + for _, line := range lines { + line = strings.TrimSpace(line) + if len(line) > 0 { + res = append(res, line) + } + } + + return res, nil +} + +func callGo(env map[string]string, cmd string, opts ...string) (string, error) { //nolint:unparam // not always receives list + args := []string{cmd} + args = append(args, opts...) + return sh.OutputWith(env, mg.GoCmd(), args...) +} + +func runVGo(cmd string, args *Args) error { + return execGoWith(func(env map[string]string, cmd string, args ...string) error { + _, err := sh.Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err + }, cmd, args) +} + +func execGoWith( + fn func(map[string]string, string, ...string) error, + cmd string, args *Args, +) error { + cliArgs := []string{cmd} + cliArgs = append(cliArgs, args.build()...) + return fn(args.env, mg.GoCmd(), cliArgs...) +} + +func posArg(value string) ArgOpt { + return func(a *Args) { a.Add(value) } +} + +func extraArg(k, v string) ArgOpt { + return func(a *Args) { a.Extra(k, v) } +} + +func extraArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return extraArg(k, v) +} + +func envArg(k, v string) ArgOpt { + return func(a *Args) { a.Env(k, v) } +} + +func envArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return envArg(k, v) +} + +func flagArg(flag, value string) ArgOpt { + return func(a *Args) { a.Flag(flag, value) } +} + +func flagArgIf(flag, value string) ArgOpt { + if value == "" { + return nil + } + return flagArg(flag, value) +} + +func flagBoolIf(flag string, b bool) ArgOpt { + if b { + return flagArg(flag, "") + } + return nil +} + +func combine(opts ...ArgOpt) ArgOpt { + return func(a *Args) { + for _, opt := range opts { + if opt != nil { + opt(a) + } + } + } +} + +func buildArgs(opts []ArgOpt) *Args { + a := &Args{} + combine(opts...)(a) + return a +} + +// Extra sets a special k/v pair to be interpreted by the execution function. +func (a *Args) Extra(k, v string) { + if a.extra == nil { + a.extra = map[string]string{} + } + a.extra[k] = v +} + +// Val returns a special functions value for a given key. +func (a *Args) Val(k string) string { + if a.extra == nil { + return "" + } + return a.extra[k] +} + +// Env sets an environmant variable to be passed to the child process on exec. +func (a *Args) Env(k, v string) { + if a.env == nil { + a.env = map[string]string{} + } + a.env[k] = v +} + +// Flag adds a flag to be passed to the child process on exec. +func (a *Args) Flag(flag, value string) { + if a.flags == nil { + a.flags = map[string][]string{} + } + a.flags[flag] = append(a.flags[flag], value) +} + +// Add adds a positional argument to be passed to the child process on exec. +func (a *Args) Add(p string) { + a.pos = append(a.pos, p) +} + +func (a *Args) build() []string { + args := make([]string, 0, 2*len(a.flags)+len(a.pos)) + for k, values := range a.flags { + for _, v := range values { + args = append(args, k) + if v != "" { + args = append(args, v) + } + } + } + + args = append(args, a.pos...) + return args +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go new file mode 100644 index 00000000000..67978ab8218 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/licenser.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goLicenser func(opts ...ArgOpt) error + +// Licenser runs `go-licenser` and provides optionals for adding command line arguments. +var Licenser goLicenser = runGoLicenser + +func runGoLicenser(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licenser", args...) +} + +func (goLicenser) Check() ArgOpt { return flagBoolIf("-d", true) } +func (goLicenser) License(license string) ArgOpt { return flagArgIf("-license", license) } +func (goLicenser) Exclude(path string) ArgOpt { return flagArgIf("-exclude", path) } +func (goLicenser) Path(path string) ArgOpt { return posArg(path) } diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go new file mode 100644 index 00000000000..d7a880756b7 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/modules.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +// Mod is the command go mod. +var Mod = goMod{ + Download: modCommand{"download"}.run, + Init: modCommand{"init"}.run, + Tidy: modCommand{"tidy"}.run, + Verify: modCommand{"verify"}.run, + Vendor: modCommand{"vendor"}.run, +} + +type modCommand struct { + method string +} + +func (cmd modCommand) run(opts ...ArgOpt) error { + o := make([]ArgOpt, len(opts)+1) + o[0] = posArg(cmd.method) + for i, opt := range opts { + o[i+1] = opt + } + args := buildArgs(o) + return runVGo("mod", args) +} + +type goMod struct { + Download modDownload + Init modInit + Tidy modTidy + Verify modVerify + Vendor modVendor +} + +// modDownload cleans the go.mod file +type modDownload func(opts ...ArgOpt) error + +// modInit initializes a new go module in folder. +type modInit func(opts ...ArgOpt) error + +// modTidy cleans the go.mod file +type modTidy func(opts ...ArgOpt) error + +// modVerify check that deps have the expected content. +type modVerify func(opts ...ArgOpt) error + +// modVendor downloads and copies dependencies under the folder vendor. +type modVendor func(opts ...ArgOpt) error diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go new file mode 100644 index 00000000000..26669c3f74f --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/gotool/noticer.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goNoticeGenerator func(opts ...ArgOpt) error + +// NoticeGenerator runs `go-license-detector` and provides optionals for adding command line arguments. +var NoticeGenerator goNoticeGenerator = runGoNoticeGenerator + +func runGoNoticeGenerator(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licence-detector", args...) +} + +func (goNoticeGenerator) Dependencies(path string) ArgOpt { return flagArg("-in", path) } +func (goNoticeGenerator) IncludeIndirect() ArgOpt { return flagBoolIf("-includeIndirect", true) } +func (goNoticeGenerator) Rules(path string) ArgOpt { return flagArg("-rules", path) } +func (goNoticeGenerator) Overrides(path string) ArgOpt { return flagArg("-overrides", path) } +func (goNoticeGenerator) NoticeTemplate(path string) ArgOpt { return flagArg("-noticeTemplate", path) } +func (goNoticeGenerator) NoticeOutput(path string) ArgOpt { return flagArg("-noticeOut", path) } +func (goNoticeGenerator) DepsOutput(path string) ArgOpt { return flagArg("-depsOut", path) } diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/install.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/install.go new file mode 100644 index 00000000000..2a4002011f8 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/install.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +var ( + // GoLicenserImportPath controls the import path used to install go-licenser. + GoLicenserImportPath = "github.com/elastic/go-licenser" + + // GoNoticeGeneratorImportPath controls the import path used to install go-licence-detector. + GoNoticeGeneratorImportPath = "go.elastic.co/go-licence-detector" +) + +// InstallGoLicenser target installs go-licenser +func InstallGoLicenser() error { + return gotool.Install( + gotool.Install.Package(GoLicenserImportPath), + ) +} + +// InstallGoLicenser target installs go-licenser +func InstallGoNoticeGen() error { + return gotool.Install( + gotool.Install.Package(GoNoticeGeneratorImportPath), + ) +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go new file mode 100644 index 00000000000..e25e0d9b870 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/linter.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +const ( + linterVersion = "v1.55.2" + linterInstallURL = "https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh" +) + +var ( + linterConfigFilename = filepath.Join(".", ".golangci.yml") + linterInstallDir = filepath.Join(".", "build") + linterInstallFile = filepath.Join(linterInstallDir, "install-golang-ci.sh") + linterBinaryFile = filepath.Join(linterInstallDir, linterVersion, "golangci-lint") +) + +// Linter contains targets related to linting the Go code +type Linter mg.Namespace + +// CheckConfig makes sure that the `.golangci.yml` does not have uncommitted changes +func (Linter) CheckConfig() error { + err := assertUnchanged(linterConfigFilename) + if err != nil { + return fmt.Errorf("linter configuration has uncommitted changes: %w", err) + } + return nil +} + +// Install installs golangci-lint (https://golangci-lint.run) to `./build` +// using the official installation script downloaded from GitHub. +// If the linter binary already exists does nothing. +func (Linter) Install() error { + return install(false) +} + +// ForceInstall force installs the linter regardless of whether it exists or not. +func (Linter) ForceInstall() error { + return install(true) +} + +func install(force bool) error { + dirPath := filepath.Dir(linterBinaryFile) + err := os.MkdirAll(dirPath, 0700) + if err != nil { + return fmt.Errorf("failed to create path %q: %w", dirPath, err) + } + + _, err = os.Stat(linterBinaryFile) + if !force && err == nil { + log.Println("The linter has been already installed, skipping...") + return nil + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed check if file %q exists: %w", linterBinaryFile, err) + } + + log.Println("Preparing the installation script file...") + + installScript, err := os.OpenFile(linterInstallFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", linterInstallFile, err) + } + defer installScript.Close() + + log.Println("Downloading the linter installation script...") + //nolint:noctx // valid use since there is no context + resp, err := http.Get(linterInstallURL) + if err != nil { + return fmt.Errorf("cannot download the linter installation script from %q: %w", linterInstallURL, err) + } + defer resp.Body.Close() + + lr := io.LimitReader(resp.Body, 1024*100) // not more than 100 KB, just to be safe + _, err = io.Copy(installScript, lr) + if err != nil { + return fmt.Errorf("failed to finish downloading the linter installation script: %w", err) + } + + err = installScript.Close() // otherwise we cannot run the script + if err != nil { + return fmt.Errorf("failed to close file %q: %w", linterInstallFile, err) + } + + binaryDir := filepath.Dir(linterBinaryFile) + err = os.MkdirAll(binaryDir, 0700) + if err != nil { + return fmt.Errorf("cannot create path %q: %w", binaryDir, err) + } + + // there must be no space after `-b`, otherwise the script does not work correctly ¯\_(ツ)_/¯ + return sh.Run(linterInstallFile, "-b"+binaryDir, linterVersion) +} + +// All runs the linter against the entire codebase +func (l Linter) All() error { + mg.Deps(l.Install, l.CheckConfig) + return runLinter() +} + +// Prints the version of the linter in use. +func (l Linter) Version() error { + mg.Deps(l.Install) + return runLinter("--version") +} + +// LastChange runs the linter against all files changed since the fork point from `main`. +// If the current branch is `main` then runs against the files changed in the last commit. +func (l Linter) LastChange() error { + mg.Deps(l.Install, l.CheckConfig) + + // get current branch name + branch, err := sh.Output("git", "rev-parse", "--abbrev-ref", "HEAD") + if err != nil { + return fmt.Errorf("failed to get the current branch: %w", err) + } + + // the linter is supposed to support linting changed diffs only but, + // for some reason, it simply does not work - does not output any + // results without linting the whole files, so we have to use `--whole-files` + // which can lead to some frustration from developers who would like to + // fix a single line in an existing codebase and the linter would force them + // into fixing all linting issues in the whole file instead + + if branch == "main" { + // files changed in the last commit + return runLinter("--new-from-rev=HEAD~", "--whole-files") + } + + return runLinter("--new-from-rev=origin/main", "--whole-files") +} + +// runLinter runs the linter passing the `mage -v` (verbose mode) and given arguments. +// Also redirects linter's output to the `stderr` instead of discarding it. +func runLinter(runFlags ...string) error { + var args []string + + if mg.Verbose() { + args = append(args, "-v") + } + + args = append(args, "run") + args = append(args, runFlags...) + args = append(args, "-c", linterConfigFilename) + args = append(args, "./...") + + return runWithStdErr(linterBinaryFile, args...) +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go new file mode 100644 index 00000000000..05841a0274e --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/mage.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/sh" +) + +func assertUnchanged(path string) error { + err := sh.Run("git", "diff", "--exit-code", path) + if err != nil { + return fmt.Errorf("failed to assert the unchanged file %q: %w", path, err) + } + + return nil +} + +// runWithStdErr runs a command redirecting its stderr to the console instead of discarding it +func runWithStdErr(command string, args ...string) error { + _, err := sh.Exec(nil, os.Stdout, os.Stderr, command, args...) + return err +} diff --git a/vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go b/vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go new file mode 100644 index 00000000000..4dc48c4763e --- /dev/null +++ b/vendor/github.com/elastic/go-grok/dev-tools/mage/notice.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/mg" + + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +func GenerateNotice(overrides, rules, noticeTemplate string) error { + mg.Deps(InstallGoNoticeGen, Deps.CheckModuleTidy) + + err := gotool.Mod.Download(gotool.Download.All()) + if err != nil { + return fmt.Errorf("error while downloading dependencies: %w", err) + } + + // Ensure the go.mod file is left unchanged after go mod download all runs. + // go mod download will modify go.sum in a way that conflicts with go mod tidy. + // https://github.com/golang/go/issues/43994#issuecomment-770053099 + defer gotool.Mod.Tidy() //nolint:errcheck // No value in handling this error. + + out, _ := gotool.ListDepsForNotice() + depsFile, _ := os.CreateTemp("", "depsout") + defer os.Remove(depsFile.Name()) + _, _ = depsFile.Write([]byte(out)) + depsFile.Close() + + generator := gotool.NoticeGenerator + return generator( + generator.Dependencies(depsFile.Name()), + generator.IncludeIndirect(), + generator.Overrides(overrides), + generator.Rules(rules), + generator.NoticeTemplate(noticeTemplate), + generator.NoticeOutput("NOTICE.txt"), + ) +} diff --git a/vendor/github.com/elastic/go-grok/grok.go b/vendor/github.com/elastic/go-grok/grok.go new file mode 100644 index 00000000000..8fe018cb18e --- /dev/null +++ b/vendor/github.com/elastic/go-grok/grok.go @@ -0,0 +1,377 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package grok + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/elastic/go-grok/patterns" +) + +const dotSep = "___" + +var ( + ErrParseFailure = fmt.Errorf("parsing failed") + ErrTypeNotProvided = fmt.Errorf("type not specified") + ErrUnsupportedName = fmt.Errorf("name contains unsupported character ':'") + + // grok can be specified in either of these forms: + // %{SYNTAX} - e.g {NUMBER} + // %{SYNTAX:ID} - e.g {NUMBER:MY_AGE} + // %{SYNTAX:ID:TYPE} - e.g {NUMBER:MY_AGE:INT} + // supported types are int, long, double, float and boolean + // for go specific implementation int and long results in int + // double and float both results in float + reusePattern = regexp.MustCompile(`%{(\w+(?::[\w+.]+(?::\w+)?)?)}`) +) + +type Grok struct { + patternDefinitions map[string]string + re *regexp.Regexp + typeHints map[string]string + lookupDefaultPatterns bool +} + +func New() *Grok { + return &Grok{ + patternDefinitions: make(map[string]string), + lookupDefaultPatterns: true, + } +} + +func NewWithoutDefaultPatterns() *Grok { + return &Grok{ + patternDefinitions: make(map[string]string), + } +} + +func NewWithPatterns(patterns ...map[string]string) (*Grok, error) { + g := &Grok{ + patternDefinitions: make(map[string]string), + lookupDefaultPatterns: true, + } + + for _, p := range patterns { + if err := g.AddPatterns(p); err != nil { + return nil, err + } + } + + return g, nil +} + +// NewComplete creates a grok parser with full set of patterns +func NewComplete(additionalPatterns ...map[string]string) (*Grok, error) { + g, err := NewWithPatterns( + patterns.AWS, + patterns.Bind9, + patterns.Bro, + patterns.Exim, + patterns.HAProxy, + patterns.Httpd, + patterns.Firewalls, + patterns.Java, + patterns.Junos, + patterns.Maven, + patterns.MCollective, + patterns.MongoDB, + patterns.PostgreSQL, + patterns.Rails, + patterns.Redis, + patterns.Ruby, + patterns.Squid, + patterns.Syslog, + ) + if err != nil { + return nil, err + } + + for _, p := range additionalPatterns { + if err := g.AddPatterns(p); err != nil { + return nil, err + } + } + + return g, nil +} + +func (grok *Grok) AddPattern(name, patternDefinition string) error { + if strings.ContainsRune(name, ':') { + return ErrUnsupportedName + } + + // overwrite existing if present + grok.patternDefinitions[name] = patternDefinition + return nil +} + +func (grok *Grok) AddPatterns(patternDefinitions map[string]string) error { + // overwrite existing if present + for name, patternDefinition := range patternDefinitions { + if strings.ContainsRune(name, ':') { + return ErrUnsupportedName + } + + grok.patternDefinitions[name] = patternDefinition + } + return nil +} + +func (grok *Grok) HasCaptureGroups() bool { + if grok == nil || grok.re == nil { + return false + } + + for _, groupName := range grok.re.SubexpNames() { + if groupName != "" { + return true + } + } + + return false +} + +func (grok *Grok) Compile(pattern string, namedCapturesOnly bool) error { + return grok.compile(pattern, namedCapturesOnly) +} + +func (grok *Grok) Match(text []byte) bool { + return grok.re.Match(text) +} + +func (grok *Grok) MatchString(text string) bool { + return grok.re.MatchString(text) +} + +// ParseString parses text in a form of string and returns map[string]string with values +// not converted to types according to hints. +// When expression is not a match nil map is returned. +func (grok *Grok) ParseString(text string) (map[string]string, error) { + return grok.captureString(text) +} + +// Parse parses text in a form of []byte and returns map[string][]byte with values +// not converted to types according to hints. +// When expression is not a match nil map is returned. +func (grok *Grok) Parse(text []byte) (map[string][]byte, error) { + return grok.captureBytes(text) +} + +// ParseTyped parses text and returns map[string]interface{} with values +// typed according to type hints generated at compile time. +// If hint is not found error returned is TypeNotProvided. +// When expression is not a match nil map is returned. +func (grok *Grok) ParseTyped(text []byte) (map[string]interface{}, error) { + captures, err := grok.captureTyped(text) + if err != nil { + return nil, err + } + + captureBytes := make(map[string]interface{}) + for k, v := range captures { + captureBytes[k] = v + } + + return captureBytes, nil +} + +// ParseTypedString parses text and returns map[string]interface{} with values +// typed according to type hints generated at compile time. +// If hint is not found error returned is TypeNotProvided. +// When expression is not a match nil map is returned. +func (grok *Grok) ParseTypedString(text string) (map[string]interface{}, error) { + return grok.ParseTyped([]byte(text)) +} + +func (grok *Grok) compile(pattern string, namedCapturesOnly bool) error { + // get expanded pattern + expandedExpression, hints, err := grok.expand(pattern, namedCapturesOnly) + if err != nil { + return err + } + + compiledExpression, err := regexp.Compile(expandedExpression) + if err != nil { + return err + } + + grok.re = compiledExpression + grok.typeHints = hints + + return nil +} + +func (grok *Grok) captureString(text string) (map[string]string, error) { + return captureTypeFn(grok.re, text, + func(v, _ string) (string, error) { + return v, nil + }, + ) +} + +func (grok *Grok) captureBytes(text []byte) (map[string][]byte, error) { + return captureTypeFn(grok.re, string(text), + func(v, _ string) ([]byte, error) { + return []byte(v), nil + }, + ) +} + +func (grok *Grok) captureTyped(text []byte) (map[string]interface{}, error) { + return captureTypeFn(grok.re, string(text), grok.convertMatch) +} + +func captureTypeFn[K any](re *regexp.Regexp, text string, conversionFn func(v, key string) (K, error)) (map[string]K, error) { + captures := make(map[string]K) + + matches := re.FindStringSubmatch(text) + if len(matches) == 0 { + return captures, nil + } + + names := re.SubexpNames() + if len(names) == 0 { + return captures, nil + } + + for i, name := range names { + if len(name) == 0 { + continue + } + + match := matches[i] + if len(match) == 0 { + continue + } + + if conversionFn != nil { + v, err := conversionFn(string(match), name) + if err != nil { + return nil, err + } + captures[strings.ReplaceAll(name, dotSep, ".")] = v + } + } + + return captures, nil +} + +func (grok *Grok) convertMatch(match, name string) (interface{}, error) { + hint, found := grok.typeHints[name] + if !found { + return match, nil + } + + switch hint { + case "string": + return match, nil + + case "double": + return strconv.ParseFloat(match, 64) + case "float": + return strconv.ParseFloat(match, 64) + + case "int": + return strconv.Atoi(match) + case "long": + return strconv.Atoi(match) + + case "bool": + return strconv.ParseBool(match) + case "boolean": + return strconv.ParseBool(match) + default: + return nil, fmt.Errorf("invalid type for %v: %w", name, ErrTypeNotProvided) + } +} + +// expand processes a pattern and returns expanded regular expression, type hints and error +func (grok *Grok) expand(pattern string, namedCapturesOnly bool) (string, map[string]string, error) { + hints := make(map[string]string) + expandedPattern := pattern + + // recursion break is guarding against cyclic reference in pattern definitions + // as this is performed only once at compile time more clever optimization (e.g detecting cycles in graph) is TBD + for recursionBreak := 1000; recursionBreak > 0; recursionBreak-- { + subMatches := reusePattern.FindAllStringSubmatch(expandedPattern, -1) + if len(subMatches) == 0 { + // nothing to expand anymore + break + } + + for _, nameSubmatch := range subMatches { + // grok can be specified in either of these forms: + // %{SYNTAX} - e.g {NUMBER} + // %{SYNTAX:ID} - e.g {NUMBER:MY_AGE} + // %{SYNTAX:ID:TYPE} - e.g {NUMBER:MY_AGE:INT} + + // nameSubmatch is equal to [["%{NAME:ID:TYPe}" "NAME:ID:TYPe"]] + // we need only inner part + nameParts := strings.Split(nameSubmatch[1], ":") + + grokId := nameParts[0] + var targetId string + if len(nameParts) > 1 { + targetId = strings.ReplaceAll(nameParts[1], ".", dotSep) + } else { + targetId = nameParts[0] + } + // compile hints for used patterns + if len(nameParts) == 3 { + hints[targetId] = nameParts[2] + } + + knownPattern, found := grok.lookupPattern(grokId) + if !found { + return "", nil, fmt.Errorf("pattern definition %q unknown: %w", grokId, ErrParseFailure) + } + + var replacementPattern string + if namedCapturesOnly && len(nameParts) == 1 { + // this has no semantic (pattern:foo) so we don't need to capture + replacementPattern = "(" + knownPattern + ")" + + } else { + replacementPattern = "(?P<" + targetId + ">" + knownPattern + ")" + } + + // expand pattern with definition + expandedPattern = strings.ReplaceAll(expandedPattern, nameSubmatch[0], replacementPattern) + } + } + + return expandedPattern, hints, nil +} + +func (grok *Grok) lookupPattern(grokId string) (string, bool) { + if knownPattern, found := grok.patternDefinitions[grokId]; found { + return knownPattern, found + } + + if grok.lookupDefaultPatterns { + if knownPattern, found := patterns.Default[grokId]; found { + return knownPattern, found + } + } + + return "", false + +} diff --git a/vendor/github.com/elastic/go-grok/magefile.go b/vendor/github.com/elastic/go-grok/magefile.go new file mode 100644 index 00000000000..44959377f54 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/magefile.go @@ -0,0 +1,90 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build mage +// +build mage + +package main + +import ( + "fmt" + "path/filepath" + + "github.com/magefile/mage/mg" + + // mage:import + "github.com/elastic/go-grok/dev-tools/mage" + + devtools "github.com/elastic/go-grok/dev-tools/mage" + "github.com/elastic/go-grok/dev-tools/mage/gotool" +) + +// Aliases are shortcuts to long target names. +// nolint: deadcode // it's used by `mage`. +var Aliases = map[string]interface{}{ + "llc": mage.Linter.LastChange, + "lint": mage.Linter.All, +} + +// Check runs all the checks +// nolint: deadcode,unparam // it's used as a `mage` target and requires returning an error +func Check() error { + mg.Deps(devtools.InstallGoLicenser) + mg.Deps(devtools.Deps.CheckModuleTidy, CheckLicenseHeaders) + mg.Deps(devtools.CheckNoChanges) + return nil +} + +// Fmt formats code and adds license headers. +func Fmt() { + mg.Deps(devtools.GoImports.Run) + mg.Deps(AddLicenseHeaders) +} + +// AddLicenseHeaders adds ASL2 headers to .go files +func AddLicenseHeaders() error { + fmt.Println(">> fmt - go-licenser: Adding missing headers") + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.License("ASL2"), + ) +} + +// CheckLicenseHeaders checks ASL2 headers in .go files +func CheckLicenseHeaders() error { + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.Check(), + licenser.License("ASL2"), + ) +} + +// Notice generates a NOTICE.txt file for the module. +func Notice() error { + return devtools.GenerateNotice( + filepath.Join("dev-tools", "templates", "notice", "overrides.json"), + filepath.Join("dev-tools", "templates", "notice", "rules.json"), + filepath.Join("dev-tools", "templates", "notice", "NOTICE.txt.tmpl"), + ) +} diff --git a/vendor/github.com/elastic/go-grok/patterns/aws.go b/vendor/github.com/elastic/go-grok/patterns/aws.go new file mode 100644 index 00000000000..3377bd3029d --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/aws.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var AWS map[string]string = map[string]string{ + "S3_REQUEST_LINE": `(?:%{WORD:http.request.method} %{NOTSPACE:url.original}(?: HTTP/%{NUMBER:http.version})?)`, + "S3_ACCESS_LOG": `%{WORD:aws.s3access.bucket_owner} %{NOTSPACE:aws.s3access.bucket} \[%{HTTPDATE:timestamp}\] (?:-|%{IP:client.address}) (?:-|%{NOTSPACE:client.user.id}) %{NOTSPACE:aws.s3access.request_id} %{NOTSPACE:aws.s3access.operation} (?:-|%{NOTSPACE:aws.s3access.key}) (?:-|"%{S3_REQUEST_LINE:aws.s3access.request_uri}") (?:-|%{INT:http.response.status_code:int}) (?:-|%{NOTSPACE:aws.s3access.error_code}) (?:-|%{INT:aws.s3access.bytes_sent:long}) (?:-|%{INT:aws.s3access.object_size:long}) (?:-|%{INT:aws.s3access.total_time:int}) (?:-|%{INT:aws.s3access.turn_around_time:int}) "(?:-|%{DATA:http.request.referrer})" "(?:-|%{DATA:user_agent.original})" (?:-|%{NOTSPACE:aws.s3access.version_id})(?: (?:-|%{NOTSPACE:aws.s3access.host_id}) (?:-|%{NOTSPACE:aws.s3access.signature_version}) (?:-|%{NOTSPACE:tls.cipher}) (?:-|%{NOTSPACE:aws.s3access.authentication_type}) (?:-|%{NOTSPACE:aws.s3access.host_header}) (?:-|%{NOTSPACE:aws.s3access.tls_version}))?`, + + "ELB_URIHOST": `%{IPORHOST:url.domain}(?::%{POSINT:url.port:int})?`, + "ELB_URIPATHQUERY": `%{URIPATH:url.path}(?:\?%{URIQUERY:url.query})?`, + "ELB_URIPATHPARAM": `%{ELB_URIPATHQUERY}`, + "ELB_URI": `%{URIPROTO:url.scheme}://(?:%{USER:url.username}(?::[^@]*)?@)?(?:%{ELB_URIHOST})?(?:%{ELB_URIPATHQUERY})?`, + "ELB_REQUEST_LINE": `(?:%{WORD:http.request.method} %{ELB_URI:url.original}(?: HTTP/%{NUMBER:http.version})?)`, + "ELB_V1_HTTP_LOG": `%{TIMESTAMP_ISO8601:timestamp} %{NOTSPACE:aws.elb.name} %{IP:source.address}:%{INT:source.port:int} (?:-|(?:%{IP:aws.elb.backend.ip}:%{INT:aws.elb.backend.port:int})) (?:-1|%{NUMBER:aws.elb.request_processing_time.sec:float}) (?:-1|%{NUMBER:aws.elb.backend_processing_time.sec:float}) (?:-1|%{NUMBER:aws.elb.response_processing_time.sec:float}) %{INT:http.response.status_code:int} (?:-|%{INT:aws.elb.backend.http.response.status_code:int}) %{INT:http.request.body.size:long} %{INT:http.response.body.size:long} "%{ELB_REQUEST_LINE}"(?: "(?:-|%{DATA:user_agent.original})" (?:-|%{NOTSPACE:tls.cipher}) (?:-|%{NOTSPACE:aws.elb.ssl_protocol}))?`, + "ELB_ACCESS_LOG": `%{ELB_V1_HTTP_LOG}`, + + "CLOUDFRONT_ACCESS_LOG": `(?%{YEAR}[-]%{MONTHNUM}[-]%{MONTHDAY}\t%{TIME})\t%{WORD:aws.cloudfront.x_edge_location}\t(?:-|%{INT:destination.bytes:long})\t%{IPORHOST:source.address}\t%{WORD:http.request.method}\t%{HOSTNAME:url.domain}\t%{NOTSPACE:url.path}\t(?:(?:000)|%{INT:http.response.status_code:int})\t(?:-|%{DATA:http.request.referrer})\t%{DATA:user_agent.original}\t(?:-|%{DATA:url.query})\t(?:-|%{DATA:aws.cloudfront.http.request.cookie})\t%{WORD:aws.cloudfront.x_edge_result_type}\t%{NOTSPACE:aws.cloudfront.x_edge_request_id}\t%{HOSTNAME:aws.cloudfront.http.request.host}\t%{URIPROTO:network.protocol.name}\t(?:-|%{INT:source.bytes:long})\t%{NUMBER:aws.cloudfront.time_taken:float}\t(?:-|%{IP:network.forwarded_ip})\t(?:-|%{DATA:aws.cloudfront.ssl_protocol})\t(?:-|%{NOTSPACE:tls.cipher})\t%{WORD:aws.cloudfront.x_edge_response_result_type}(?:\t(?:-|HTTP/%{NUMBER:http.version})\t(?:-|%{DATA:aws.cloudfront.fle_status})\t(?:-|%{DATA:aws.cloudfront.fle_encrypted_fields})\t%{INT:source.port:int}\t%{NUMBER:aws.cloudfront.time_to_first_byte:float}\t(?:-|%{DATA:aws.cloudfront.x_edge_detailed_result_type})\t(?:-|%{NOTSPACE:http.request.mime_type})\t(?:-|%{INT:aws.cloudfront.http.request.size:long})\t(?:-|%{INT:aws.cloudfront.http.request.range.start:long})\t(?:-|%{INT:aws.cloudfront.http.request.range.end:long}))?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/bind9.go b/vendor/github.com/elastic/go-grok/patterns/bind9.go new file mode 100644 index 00000000000..1639fa4f93c --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/bind9.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Bind9 map[string]string = map[string]string{ + "BIND9_TIMESTAMP": `%{MONTHDAY}[-]%{MONTH}[-]%{YEAR} %{TIME}`, + "BIND9_DNSTYPE": `(?:A|AAAA|CAA|CDNSKEY|CDS|CERT|CNAME|CSYNC|DLV|DNAME|DNSKEY|DS|HINFO|LOC|MX|NAPTR|NS|NSEC|NSEC3|OPENPGPKEY|PTR|RRSIG|RP|SIG|SMIMEA|SOA|SRV|TSIG|TXT|URI|IN)`, + "BIND9_CATEGORY": `(?:queries)`, + "BIND9_QUERYLOGBASE": `client(:? @0x(?:[0-9A-Fa-f]+))? %{IP:client.address}#%{POSINT:client.port:int} \(%{GREEDYDATA:bind.log.question.name}\): query: %{GREEDYDATA:dns.question.name} (?P(?:IN)) %{BIND9_DNSTYPE:dns.question.type}(:? %{DATA:bind.log.question.flags})? \(%{IP:server.address}\)`, + "BIND9_QUERYLOG": `%{BIND9_TIMESTAMP:timestamp} %{BIND9_CATEGORY:bing.log.category}: %{LOGLEVEL:log.level}: %{BIND9_QUERYLOGBASE}`, + "BIND9": `%{BIND9_QUERYLOG}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/bro.go b/vendor/github.com/elastic/go-grok/patterns/bro.go new file mode 100644 index 00000000000..6e1ba00d93a --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/bro.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Bro map[string]string = map[string]string{ + "BRO_BOOL": `[TF]`, + "BRO_DATA": `[^\t]+`, + "BRO_HTTP": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.address}\t%{INT:source.port:int}\t%{IP:destination.address}\t%{INT:destination.port:int}\t%{INT:zeek.http.trans_depth:int}\t(?:-|%{WORD:http.request.method})\t(?:-|%{BRO_DATA:url.domain})\t(?:-|%{BRO_DATA:url.original})\t(?:-|%{BRO_DATA:http.request.referrer})\t(?:-|%{BRO_DATA:user_agent.original})\t(?:-|%{NUMBER:http.request.body.size:long})\t(?:-|%{NUMBER:http.response.body.size:long})\t(?:-|%{POSINT:http.response.status_code:int})\t(?:-|%{DATA:zeek.http.status_msg})\t(?:-|%{POSINT:zeek.http.info_code:int})\t(?:-|%{DATA:zeek.http.info_msg})\t(?:-|%{BRO_DATA:zeek.http.filename})\t(?:\(empty\)|%{BRO_DATA:zeek.http.tags})\t(?:-|%{BRO_DATA:url.username})\t(?:-|%{BRO_DATA:url.password})\t(?:-|%{BRO_DATA:zeek.http.proxied})\t(?:-|%{BRO_DATA:zeek.http.orig_fuids})\t(?:-|%{BRO_DATA:http.request.mime_type})\t(?:-|%{BRO_DATA:zeek.http.resp_fuids})\t(?:-|%{BRO_DATA:http.response.mime_type})`, + "BRO_DNS": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.address}\t%{INT:source.port:int}\t%{IP:destination.address}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{INT:dns.id:int})\t(?:-|%{BRO_DATA:dns.question.name})\t(?:-|%{INT:zeek.dns.qclass:int})\t(?:-|%{BRO_DATA:zeek.dns.qclass_name})\t(?:-|%{INT:zeek.dns.qtype:int})\t(?:-|%{BRO_DATA:dns.question.type})\t(?:-|%{INT:zeek.dns.rcode:int})\t(?:-|%{BRO_DATA:dns.response_code})\t(?:-|%{BRO_BOOL:zeek.dns.AA})\t(?:-|%{BRO_BOOL:zeek.dns.TC})\t(?:-|%{BRO_BOOL:zeek.dns.RD})\t(?:-|%{BRO_BOOL:zeek.dns.RA})\t(?:-|%{NONNEGINT:zeek.dns.Z:int})\t(?:-|%{BRO_DATA:zeek.dns.answers})\t(?:-|%{DATA:zeek.dns.TTLs})\t(?:-|%{BRO_BOOL:zeek.dns.rejected})`, + "BRO_CONN": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.session_id}\t%{IP:source.address}\t%{INT:source.port:int}\t%{IP:destination.address}\t%{INT:destination.port:int}\t%{WORD:network.transport}\t(?:-|%{BRO_DATA:network.protocol.name})\t(?:-|%{NUMBER:zeek.connection.duration:float})\t(?:-|%{INT:zeek.connection.orig_bytes:long})\t(?:-|%{INT:zeek.connection.resp_bytes:long})\t(?:-|%{BRO_DATA:zeek.connection.state})\t(?:-|%{BRO_BOOL:zeek.connection.local_orig})\t(?:(?:-|%{BRO_BOOL:zeek.connection.local_resp})\t)?(?:-|%{INT:zeek.connection.missed_bytes:long})\t(?:-|%{BRO_DATA:zeek.connection.history})\t(?:-|%{INT:source.packets:long})\t(?:-|%{INT:source.bytes:long})\t(?:-|%{INT:destination.packets:long})\t(?:-|%{INT:destination.bytes:long})\t(?:\(empty\)|%{BRO_DATA:zeek.connection.tunnel_parents})`, + "BRO_FILES": `%{NUMBER:timestamp}\t%{NOTSPACE:zeek.files.fuid}\t(?:-|%{IP:server.address})\t(?:-|%{IP:client.address})\t(?:-|%{BRO_DATA:zeek.files.session_ids})\t(?:-|%{BRO_DATA:zeek.files.source})\t(?:-|%{INT:zeek.files.depth:int})\t(?:-|%{BRO_DATA:zeek.files.analyzers})\t(?:-|%{BRO_DATA:file.mime_type})\t(?:-|%{BRO_DATA:file.name})\t(?:-|%{NUMBER:zeek.files.duration:float})\t(?:-|%{BRO_DATA:zeek.files.local_orig})\t(?:-|%{BRO_BOOL:zeek.files.is_orig})\t(?:-|%{INT:zeek.files.seen_bytes:long})\t(?:-|%{INT:file.size:long})\t(?:-|%{INT:zeek.files.missing_bytes:long})\t(?:-|%{INT:zeek.files.overflow_bytes:long})\t(?:-|%{BRO_BOOL:zeek.files.timedout})\t(?:-|%{BRO_DATA:zeek.files.parent_fuid})\t(?:-|%{BRO_DATA:file.hash.md5})\t(?:-|%{BRO_DATA:file.hash.sha1})\t(?:-|%{BRO_DATA:file.hash.sha256})\t(?:-|%{BRO_DATA:zeek.files.extracted})`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/default.go b/vendor/github.com/elastic/go-grok/patterns/default.go new file mode 100644 index 00000000000..350bcec3ff3 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/default.go @@ -0,0 +1,121 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Default map[string]string = map[string]string{ + "WORD": `\b\w+\b`, + "NOTSPACE": `\S+`, + "SPACE": `\s*`, + "DATA": `.*?`, + + // types + "INT": `(?:[+-]?(?:[0-9]+))`, + "NUMBER": `(?:%{BASE10NUM})`, + "BOOL": "true|false", + + "BASE10NUM": `([+-]?(?:[0-9]+(?:\.[0-9]+)?)|\.[0-9]+)`, + "BASE16NUM": `[+-]?(?:0x)?[0-9A-Fa-f]+`, // Adjusted, removed lookbehind + "BASE16FLOAT": `[+-]?(?:0x)?[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?`, // Adjusted, removed lookbehind and word boundaries + "POSINT": `\b[1-9][0-9]*\b`, + "NONNEGINT": `\b[0-9]+\b`, + "GREEDYDATA": `.*`, + "QUOTEDSTRING": `"([^"\\]*(\\.[^"\\]*)*)"|\'([^\'\\]*(\\.[^\'\\]*)*)\'`, + "UUID": `[A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}`, + "URN": `urn:[0-9A-Za-z][0-9A-Za-z-]{0,31}:[0-9A-Za-z()+,.:=@;$_!*'/?#-]+`, + + // network + "IP": `(?:%{IPV6}|%{IPV4})`, + "IPV6": `((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?`, + "IPV4": `(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)`, + + "IPORHOST": `(?:%{IP}|%{HOSTNAME})`, + "HOSTNAME": `\b(?:[0-9A-Za-z][0-9A-Za-z-]{0,62})(?:\.(?:[0-9A-Za-z][0-9A-Za-z-]{0,62}))*(\.?|\b)`, + "EMAILLOCALPART": `[a-zA-Z][a-zA-Z0-9_.+-=:]+`, + "EMAILADDRESS": `%{EMAILLOCALPART}@%{HOSTNAME}`, + "USERNAME": `[a-zA-Z0-9._-]+`, + "USER": `%{USERNAME}`, + + "MAC": `(?:%{CISCOMAC}|%{WINDOWSMAC}|%{COMMONMAC})`, + "CISCOMAC": `(?:(?:[A-Fa-f0-9]{4}\.){2}[A-Fa-f0-9]{4})`, + "WINDOWSMAC": `(?:(?:[A-Fa-f0-9]{2}-){5}[A-Fa-f0-9]{2})`, + "COMMONMAC": `(?:(?:[A-Fa-f0-9]{2}:){5}[A-Fa-f0-9]{2})`, + "HOSTPORT": `%{IPORHOST}:%{POSINT}`, + + // paths + "UNIXPATH": `(/[\w_%!$@:.,+~-]+)+`, + "TTY": `/dev/(pts|tty([pq])?)(\w+)?/?(?:[0-9]+)`, + "WINPATH": `[A-Za-z]+:(\\[^\\?*]+)+`, + "URIPROTO": `[A-Za-z][A-Za-z0-9+\.-]+`, + "URIHOST": `%{IPORHOST}(?::%{POSINT})?`, + "URIPATH": `(/[A-Za-z0-9$.+!*'(){},~:;=@#%&_\-]+)+`, + "URIQUERY": `[A-Za-z0-9$.+!*'|(){},~@#%&/=:;_?\-\[\]<>]*`, + "URIPARAM": `\?%{URIQUERY}`, + "URIPATHPARAM": `%{URIPATH}(?:\?%{URIQUERY})?`, + "URI": `%{URIPROTO}://(?:%{USER}(?::[^@]*)?@)?%{URIHOST}(?:%{URIPATH}(?:\?%{URIQUERY})?)?`, + "PATH": `(?:%{UNIXPATH}|%{WINPATH})`, + + // dates + "MONTH": `\b(?:Jan(?:uary)?|Feb(?:ruary)?|Mar(?:ch)?|Apr(?:il)?|May|Jun(?:e)?|Jul(?:y)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\b`, + + // Months: January, Feb, 3, 03, 12, December "MONTH": `\b(?:[Jj]an(?:uary|uar)?|[Ff]eb(?:ruary|ruar)?|[Mm](?:a|ä)?r(?:ch|z)?|[Aa]pr(?:il)?|[Mm]a(?:y|i)?|[Jj]un(?:e|i)?|[Jj]ul(?:y|i)?|[Aa]ug(?:ust)?|[Ss]ep(?:tember)?|[Oo](?:c|k)?t(?:ober)?|[Nn]ov(?:ember)?|[Dd]e(?:c|z)(?:ember)?)\b`, + "MONTHNUM": `(?:0[1-9]|1[0-2])`, + "MONTHDAY": `(?:(?:0[1-9])|(?:[12][0-9])|(?:3[01])|[1-9])`, + + // Days Monday, Tue, Thu, etc + "DAY": `\b(?:Mon(?:day)?|Tue(?:sday)?|Wed(?:nesday)?|Thu(?:rsday)?|Fri(?:day)?|Sat(?:urday)?|Sun(?:day)?)\b`, + + // Years? + "YEAR": `(\d\d){1,2}`, + "HOUR": `(?:2[0123]|[01]?[0-9])`, + "MINUTE": `(?:[0-5][0-9])`, + + // '60' is a leap second in most time standards and thus is valid. + "SECOND": `(?:(?:[0-5][0-9]|60)(?:[:.,][0-9]+)?)`, + "TIME": `%{HOUR}:%{MINUTE}(?::%{SECOND})?`, + + // datestamp is YYYY/MM/DD-HH:MM:SS.UUUU (or something like it) + "DATE_US": `%{MONTHNUM}[/-]%{MONTHDAY}[/-]%{YEAR}`, + "DATE_EU": `%{MONTHDAY}[./-]%{MONTHNUM}[./-]%{YEAR}`, + "ISO8601_TIMEZONE": `(?:Z|[+-]%{HOUR}(?::?%{MINUTE}))`, + "ISO8601_SECOND": `%{SECOND}`, + "TIMESTAMP_ISO8601": `%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE}?`, + "DATE": `%{DATE_US}|%{DATE_EU}`, + "DATESTAMP": `%{DATE}[- ]%{TIME}`, + "TZ": `(?:[PMACE][SED]T|UTC)`, + "DATESTAMP_RFC822": `%{DAY} %{MONTH} %{MONTHDAY} %{YEAR} %{TIME} %{TZ}`, + "DATESTAMP_RFC2822": `%{DAY}, %{MONTHDAY} %{MONTH} %{YEAR} %{TIME} %{ISO8601_TIMEZONE}`, + "DATESTAMP_OTHER": `%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{TZ} %{YEAR}`, + "DATESTAMP_EVENTLOG": `%{YEAR}%{MONTHNUM}%{MONTHDAY}%{HOUR}%{MINUTE}%{SECOND}`, + + // Syslog Dates: Month Day HH:MM:SS "MONTH": `\b(?:Jan(?:uary|uar)?|Feb(?:ruary|ruar)?|Mar(?:ch|z)?|Apr(?:il)?|May|i|Jun(?:e|i)?|Jul(?:y|i)?|Aug(?:ust)?|Sep(?:tember)?|Oct(?:ober)?|Nov(?:ember)?|Dec(?:ember)?)\b`, + "SYSLOGTIMESTAMP": `%{MONTH} +%{MONTHDAY} %{TIME}`, + "PROG": `[!-Z\\^-~]+`, // Simplified range based on ASCII + "SYSLOGPROG": `%{PROG}(?:\[\d+\])?`, // Simplified, as type hints and named groups aren't supported in Go `regexp` + "SYSLOGHOST": `%{IPORHOST}`, + "SYSLOGFACILITY": `<%{NONNEGINT}.%{NONNEGINT}>`, // Simplified to remove type hints + "HTTPDATE": `%{MONTHDAY}/%{MONTH}/%{YEAR}:%{TIME} %{INT}`, + + // Shortcuts + "QS": `%{QUOTEDSTRING}`, + + // Log formats + "SYSLOGBASE": `%{SYSLOGTIMESTAMP:timestamp} (?:%{SYSLOGFACILITY} )?%{SYSLOGHOST:host.name} %{SYSLOGPROG}:`, + + // Log Levels + "LOGLEVEL": `(?i)(alert|trace|debug|notice|info(?:rmation)?|warn(?:ing)?|err(?:or)?|crit(?:ical)?|fatal|severe|emerg(?:ency)?)`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/exim.go b/vendor/github.com/elastic/go-grok/patterns/exim.go new file mode 100644 index 00000000000..8873c4c1d00 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/exim.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Exim map[string]string = map[string]string{ + "EXIM_MSGID": `[0-9A-Za-z]{6}-[0-9A-Za-z]{6}-[0-9A-Za-z]{2}`, + "EXIM_FLAGS": `(?:<=|=>|->|\*>|\*\*|==|<>|>>)`, + "EXIM_DATE": `(:?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME})`, + "EXIM_PID": `\[%{POSINT:process.pid:int}\]`, + "EXIM_QT": `((\d+y)?(\d+w)?(\d+d)?(\d+h)?(\d+m)?(\d+s)?)`, + "EXIM_EXCLUDE_TERMS": `(Message is frozen|(Start|End) queue run| Warning: | retry time not reached | no (IP address|host name) found for (IP address|host) | unexpected disconnection while reading SMTP command | no immediate delivery: |another process is handling this message)`, + "EXIM_REMOTE_HOST": `(H=(\(%{NOTSPACE:source.host.name}\) )?(\(%{NOTSPACE:exim.log.remote_address}\) )?\[%{IP:source.address}\](?::%{POSINT:source.port:int})?)`, + "EXIM_INTERFACE": `(I=\[%{IP:destination.address}\](?::%{NUMBER:destination.port:int}))`, + "EXIM_PROTOCOL": `(P=%{NOTSPACE:network.protocol.name})`, + "EXIM_MSG_SIZE": `(S=%{NUMBER:exim.log.message.body.size:int})`, + "EXIM_HEADER_ID": `(id=%{NOTSPACE:exim.log.header_id})`, + "EXIM_QUOTED_CONTENT": `(?:\\.|[^\\"])*`, + "EXIM_SUBJECT": `(T="%{EXIM_QUOTED_CONTENT:exim.log.message.subject}")`, + "EXIM_UNKNOWN_FIELD": `(?:[A-Za-z0-9]{1,4}=(?:%{QUOTEDSTRING}|%{NOTSPACE}))`, + "EXIM_NAMED_FIELDS": `(?: (?:%{EXIM_REMOTE_HOST}|%{EXIM_INTERFACE}|%{EXIM_PROTOCOL}|%{EXIM_MSG_SIZE}|%{EXIM_HEADER_ID}|%{EXIM_SUBJECT}|%{EXIM_UNKNOWN_FIELD}))*`, + "EXIM_MESSAGE_ARRIVAL": `%{EXIM_DATE:timestamp} (?:%{EXIM_PID} )?%{EXIM_MSGID:exim.log.message.id} (?P\<\=) ((?P[a-z:]) )?%{EMAILADDRESS:exim.log.sender.email}%{EXIM_NAMED_FIELDS}(?:(?: from \?)? for %{EMAILADDRESS:exim.log.recipient.email})?`, + "EXIM": `%{EXIM_MESSAGE_ARRIVAL}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/firewalls.go b/vendor/github.com/elastic/go-grok/patterns/firewalls.go new file mode 100644 index 00000000000..78244b72e95 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/firewalls.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Firewalls map[string]string = map[string]string{ + + // NetScreen firewall logs + "NETSCREENSESSIONLOG": `%{SYSLOGTIMESTAMP:timestamp} %{IPORHOST:observer.hostname} %{NOTSPACE:observer.name}\: (?PNetScreen) device_id=%{WORD:netscreen.device_id} .*?(system-(\w+)-(%{NONNEGINT:event.code})\((%{WORD:netscreen.session.type})\))?\: start_time="%{DATA:netscreen.session.start_time}" duration=%{INT:netscreen.session.duration:int} policy_id=%{INT:netscreen.policy_id} service=%{DATA:netscreen.service} proto=%{INT:netscreen.protocol_number:int} src zone=%{WORD:observer.ingress.zone} dst zone=%{WORD:observer.egress.zone} action=%{WORD:event.action} sent=%{INT:source.bytes:long} rcvd=%{INT:destination.bytes:long} src=%{IPORHOST:source.address} dst=%{IPORHOST:destination.address}(?: src_port=%{INT:source.port:int} dst_port=%{INT:destination.port:int})?(?: src-xlated ip=%{IP:source.nat.ip} port=%{INT:source.nat.port:int} dst-xlated ip=%{IP:destination.nat.ip} port=%{INT:destination.nat.port:int})?(?: session_id=%{INT:netscreen.session.id} reason=%{GREEDYDATA:netscreen.session.reason})?`, + + // == Cisco ASA == + "CISCO_TAGGED_SYSLOG": `^<%{POSINT:log.syslog.priority:int}>%{CISCOTIMESTAMP:timestamp}( %{SYSLOGHOST:host.name})? ?: %%{CISCOTAG:cisco.asa.tag}:`, + "CISCOTIMESTAMP": `%{MONTH} +%{MONTHDAY}(?: %{YEAR})? %{TIME}`, + "CISCOTAG": `[A-Z0-9]+-%{INT}-(?:[A-Z0-9_]+)`, + + // Common Particles + "CISCO_ACTION": `Built|Teardown|Deny|Denied|denied by ACL|requested|permitted|denied|discarded|est-allowed|Dropping|created|deleted`, + "CISCO_REASON": `Duplicate TCP SYN|Failed to locate egress interface|Invalid transport field|No matching connection|DNS Response|DNS Query|(?:%{WORD}\s*)*`, + "CISCO_DIRECTION": `Inbound|inbound|Outbound|outbound`, + "CISCO_INTERVAL": `first hit|%{INT}-second interval`, + "CISCO_XLATE_TYPE": `static|dynamic`, + + // helpers + "CISCO_HITCOUNT_INTERVAL": `hit-cnt %{INT:cisco.asa.hit_count:int} (?:first hit|%{INT:cisco.asa.interval:int}-second interval)`, + "CISCO_SRC_IP_USER": `%{NOTSPACE:observer.ingress.interface.name}:%{IP:source.address}(?:\(%{DATA:source.user.name}\))?`, + "CISCO_DST_IP_USER": `%{NOTSPACE:observer.egress.interface.name}:%{IP:destination.address}(?:\(%{DATA:destination.user.name}\))?`, + "CISCO_SRC_HOST_PORT_USER": `%{NOTSPACE:observer.ingress.interface.name}:(?:(?:%{IP:source.address})|(?:%{HOSTNAME:source.address}))(?:/%{INT:source.port:int})?(?:\(%{DATA:source.user.name}\))?`, + "CISCO_DST_HOST_PORT_USER": `%{NOTSPACE:observer.egress.interface.name}:(?:(?:%{IP:destination.address})|(?:%{HOSTNAME:destination.address}))(?:/%{INT:destination.port:int})?(?:\(%{DATA:destination.user.name}\))?`, + "CISCOFW104001": `\((?:Primary|Secondary)\) Switching to ACTIVE - %{GREEDYDATA:event.reason}`, + "CISCOFW104002": `\((?:Primary|Secondary)\) Switching to STANDBY - %{GREEDYDATA:event.reason}`, + "CISCOFW104003": `\((?:Primary|Secondary)\) Switching to FAILED\.`, + "CISCOFW104004": `\((?:Primary|Secondary)\) Switching to OK\.`, + "CISCOFW105003": `\((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{NOTSPACE:network.interface.name} waiting`, + "CISCOFW105004": `\((?:Primary|Secondary)\) Monitoring on [Ii]nterface %{NOTSPACE:network.interface.name} normal`, + "CISCOFW105005": `\((?:Primary|Secondary)\) Lost Failover communications with mate on [Ii]nterface %{NOTSPACE:network.interface.name}`, + "CISCOFW105008": `\((?:Primary|Secondary)\) Testing [Ii]nterface %{NOTSPACE:network.interface.name}`, + "CISCOFW105009": `\((?:Primary|Secondary)\) Testing on [Ii]nterface %{NOTSPACE:network.interface.name} (?:Passed|Failed)`, + "CISCOFW106001": `%{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} connection %{CISCO_ACTION:cisco.asa.outcome} from %{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int} flags %{DATA:cisco.asa.tcp_flags} on interface %{NOTSPACE:observer.egress.interface.name}`, + "CISCOFW106006_106007_106010": `%{CISCO_ACTION:cisco.asa.outcome} %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} (?:from|src) %{IP:source.address}/%{INT:source.port:int}(?:\(%{DATA:source.user.name}\))? (?:to|dst) %{IP:destination.address}/%{INT:destination.port:int}(?:\(%{DATA:destination.user.name}\))? (?:(?:on interface %{NOTSPACE:observer.egress.interface.name})|(?:due to %{CISCO_REASON:event.reason}))`, + "CISCOFW106014": `%{CISCO_ACTION:cisco.asa.outcome} %{CISCO_DIRECTION:cisco.asa.network.direction} %{WORD:cisco.asa.network.transport} src %{CISCO_SRC_IP_USER} dst %{CISCO_DST_IP_USER}\s?\(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\)`, + "CISCOFW106015": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} \(%{DATA:cisco.asa.rule_name}\) from %{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int} flags %{DATA:cisco.asa.tcp_flags} on interface %{NOTSPACE:observer.egress.interface.name}`, + "CISCOFW106021": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} reverse path check from %{IP:source.address} to %{IP:destination.address} on interface %{NOTSPACE:observer.egress.interface.name}`, + "CISCOFW106023": `%{CISCO_ACTION:action}( protocol)? %{WORD:network.protocol.name} src %{DATA:source.interface}:%{DATA:source.address}(/%{INT:source.port})?(\(%{DATA:source.fwuser}\))? dst %{DATA:destination.interface}:%{DATA:destination.address}(/%{INT:destination.port})?(\(%{DATA:destination.fwuser}\))?( \(type %{INT:icmp_type}, code %{INT:icmp_code}\))? by access-group "?%{DATA:policy_id}"? \[%{DATA:hashcode1}, %{DATA:hashcode2}\]`, + "CISCOFW106100_2_3": `access-list %{NOTSPACE:cisco.asa.rule_name} %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} for user '%{DATA:user.name}' %{DATA:observer.ingress.interface.name}\/%{IP:source.address}\(%{INT:source.port:int}\) -> %{DATA:observer.egress.interface.name}\/%{IP:destination.address}\(%{INT:destination.port:int}\) %{CISCO_HITCOUNT_INTERVAL} \[%{DATA:metadata.cisco.asa.hashcode1}\, %{DATA:metadata.cisco.asa.hashcode2}\]`, + + "CISCOFW106100": `access-list %{NOTSPACE:cisco.asa.rule_name} %{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} %{DATA:observer.ingress.interface.name}/%{IP:source.address}\(%{INT:source.port:int}\)(?:\(%{DATA:source.user.name}\))? -> %{DATA:observer.egress.interface.name}/%{IP:destination.address}\(%{INT:destination.port:int}\)(?:\(%{DATA:source.user.name}\))? hit-cnt %{INT:cisco.asa.hit_count:int} %{CISCO_INTERVAL} \[%{DATA:metadata.cisco.asa.hashcode1}\, %{DATA:metadata.cisco.asa.hashcode2}\]`, + "CISCOFW304001": `%{IP:source.address}(?:\(%{DATA:source.user.name}\))? Accessed URL %{IP:destination.address}:%{GREEDYDATA:url.original}`, + "CISCOFW110002": `%{CISCO_REASON:event.reason} for %{WORD:cisco.asa.network.transport} from %{DATA:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW302010": `%{INT:cisco.asa.connections.in_use:int} in use, %{INT:cisco.asa.connections.most_used:int} most used`, + "CISCOFW302013_302014_302015_302016": `%{CISCO_ACTION:cisco.asa.outcome}(?: %{CISCO_DIRECTION:cisco.asa.network.direction})? %{WORD:cisco.asa.network.transport} connection %{INT:cisco.asa.connection_id} for %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int}(?: \(%{IP:source.nat.ip}/%{INT:source.nat.port:int}\))?(?:\(%{DATA:source.user.name?}\))? to %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}( \(%{IP:destination.nat.ip}/%{INT:destination.nat.port:int}\))?(?:\(%{DATA:destination.user.name}\))?( duration %{TIME:cisco.asa.duration} bytes %{INT:network.bytes:long})?(?: %{CISCO_REASON:event.reason})?(?: \(%{DATA:user.name}\))?`, + "CISCOFW302020_302021": `%{CISCO_ACTION:cisco.asa.outcome}(?: %{CISCO_DIRECTION:cisco.asa.network.direction})? %{WORD:cisco.asa.network.transport} connection for faddr %{IP:destination.address}/%{INT:cisco.asa.icmp_seq:int}(?:\(%{DATA:destination.user.name}\))? gaddr %{IP:source.nat.ip}/%{INT:cisco.asa.icmp_type:int} laddr %{IP:source.address}/%{INT}(?: \(%{DATA:source.user.name}\))?`, + "CISCOFW305011": `%{CISCO_ACTION:cisco.asa.outcome} %{CISCO_XLATE_TYPE} %{WORD:cisco.asa.network.transport} translation from %{DATA:observer.ingress.interface.name}:%{IP:source.address}(/%{INT:source.port:int})?(?:\(%{DATA:source.user.name}\))? to %{DATA:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW313001_313004_313008": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} type=%{INT:cisco.asa.icmp_type:int}, code=%{INT:cisco.asa.icmp_code:int} from %{IP:source.address} on interface %{NOTSPACE:observer.egress.interface.name}(?: to %{IP:destination.address})?`, + "CISCOFW313005": `%{CISCO_REASON:event.reason} for %{WORD:cisco.asa.network.transport} error message: %{WORD} src %{CISCO_SRC_IP_USER} dst %{CISCO_DST_IP_USER} \(type %{INT:cisco.asa.icmp_type:int}, code %{INT:cisco.asa.icmp_code:int}\) on %{NOTSPACE} interface\.\s+Original IP payload: %{WORD:cisco.asa.original_ip_payload.network.transport} src %{IP:cisco.asa.original_ip_payload.source.address}/%{INT:cisco.asa.original_ip_payload.source.port:int}(?:\(%{DATA:cisco.asa.original_ip_payload.source.user.name}\))? dst %{IP:cisco.asa.original_ip_payload.destination.address}/%{INT:cisco.asa.original_ip_payload.destination.port:int}(?:\(%{DATA:cisco.asa.original_ip_payload.destination.user.name}\))?`, + "CISCOFW321001": `Resource '%{DATA:cisco.asa.resource.name}' limit of %{POSINT:cisco.asa.resource.limit:int} reached for system`, + "CISCOFW402117": `%{WORD:cisco.asa.network.type}: Received a non-IPSec packet \(protocol=\s?%{WORD:cisco.asa.network.transport}\) from %{IP:source.address} to %{IP:destination.address}\.?`, + "CISCOFW402119": `%{WORD:cisco.asa.network.type}: Received an %{WORD:cisco.asa.ipsec.protocol} packet \(SPI=\s?%{DATA:cisco.asa.ipsec.spi}, sequence number=\s?%{DATA:cisco.asa.ipsec.seq_num}\) from %{IP:source.address} \(user=\s?%{DATA:source.user.name}\) to %{IP:destination.address} that failed anti-replay checking\.?`, + "CISCOFW419001": `%{CISCO_ACTION:cisco.asa.outcome} %{WORD:cisco.asa.network.transport} packet from %{NOTSPACE:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int} to %{NOTSPACE:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}, reason: %{GREEDYDATA:event.reason}`, + "CISCOFW419002": `%{CISCO_REASON:event.reason} from %{DATA:observer.ingress.interface.name}:%{IP:source.address}/%{INT:source.port:int} to %{DATA:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int} with different initial sequence number`, + "CISCOFW500004": `%{CISCO_REASON:event.reason} for protocol=%{WORD:cisco.asa.network.transport}, from %{IP:source.address}/%{INT:source.port:int} to %{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW602303_602304": `%{WORD:cisco.asa.network.type}: An %{CISCO_DIRECTION:cisco.asa.network.direction} %{DATA:cisco.asa.ipsec.tunnel_type} SA \(SPI=%{DATA:cisco.asa.ipsec.spi}\) between %{IP:source.address} and %{IP:destination.address} \(user=%{DATA:source.user.name}\) has been %{CISCO_ACTION:cisco.asa.outcome}`, + "CISCOFW710001_710002_710003_710005_710006": `%{WORD:cisco.asa.network.transport} (?:request|access) %{CISCO_ACTION:cisco.asa.outcome} from %{IP:source.address}/%{INT:source.port:int} to %{DATA:observer.egress.interface.name}:%{IP:destination.address}/%{INT:destination.port:int}`, + "CISCOFW713172": `Group = %{DATA:cisco.asa.source.group}, IP = %{IP:source.address}, Automatic NAT Detection Status:\s+Remote end\s*%{DATA:metadata.cisco.asa.remote_nat}\s*behind a NAT device\s+This\s+end\s*%{DATA:metadata.cisco.asa.local_nat}\s*behind a NAT device`, + "CISCOFW733100": `\\s*%{DATA:[cisco.asa.burst.object}\s*\] drop %{DATA:cisco.asa.burst.id} exceeded. Current burst rate is %{INT:cisco.asa.burst.current_rate:int} per second, max configured rate is %{INT:cisco.asa.burst.configured_rate:int}; Current average rate is %{INT:cisco.asa.burst.avg_rate:int} per second, max configured rate is %{INT:cisco.asa.burst.configured_avg_rate:int}; Cumulative total count is %{INT:cisco.asa.burst.cumulative_count:int}`, + + "IPTABLES_TCP_FLAGS": `(CWR |ECE |URG |ACK |PSH |RST |SYN |FIN )*`, + "IPTABLES_TCP_PART": `(?:SEQ=%{INT:iptables.tcp.seq:int}\s+)?(?:ACK=%{INT:iptables.tcp.ack:int}\s+)?WINDOW=%{INT:iptables.tcp.window:int}\s+RES=0x%{BASE16NUM:iptables.tcp_reserved_bits}\s+%{IPTABLES_TCP_FLAGS:iptables.tcp.flags}`, + + "IPTABLES4_FRAG": `((\s)?(CE|DF|MF))*`, + "IPTABLES4_PART": `SRC=%{IPV4:source.address}\s+DST=%{IPV4:destination.address}\s+LEN=(?:%{INT:iptables.length:int})?\s+TOS=(?:0|0x%{BASE16NUM:iptables.tos})?\s+PREC=(?:0x%{BASE16NUM:iptables.precedence_bits})?\s+TTL=(?:%{INT:iptables.ttl:int})?\s+ID=(?:%{INT:iptables.id})?\s+(?:%{IPTABLES4_FRAG:iptables.fragment_flags})?(?:\s+FRAG: %{INT:iptables.fragment_offset:int})?`, + "IPTABLES6_PART": `SRC=%{IPV6:source.address}\s+DST=%{IPV6:destination.address}\s+LEN=(?:%{INT:iptables.length:int})?\s+TC=(?:0|0x%{BASE16NUM:iptables.tos})?\s+HOPLIMIT=(?:%{INT:iptables.ttl:int})?\s+FLOWLBL=(?:%{INT:iptables.flow_label})?`, + + "IPTABLES": `IN=(?:%{NOTSPACE:observer.ingress.interface.name})?\s+OUT=(?:%{NOTSPACE:observer.egress.interface.name})?\s+(?:MAC=(?:%{COMMONMAC:destination.mac})?(?::%{COMMONMAC:source.mac})?(?::A-Fa-f0-9{2}:A-Fa-f0-9{2})?\s+)?(:?%{IPTABLES4_PART}|%{IPTABLES6_PART}).*?PROTO=(?:%{WORD:network.transport})?\s+SPT=(?:%{INT:source.port:int})?\s+DPT=(?:%{INT:destination.port:int})?\s+(?:%{IPTABLES_TCP_PART})?`, + + // Shorewall firewall logs + "SHOREWALL": `(?:%{SYSLOGTIMESTAMP:timestamp}) (?:%{WORD:observer.hostname}) .*Shorewall:(?:%{WORD:shorewall.firewall.type})?:(?:%{WORD:shorewall.firewall.action})?.*%{IPTABLES}`, + + // == SuSE Firewall 2 == + "SFW2_LOG_PREFIX": `SFW2\-INext\-%{NOTSPACE:suse.firewall.action}`, + "SFW2": `((?:%{SYSLOGTIMESTAMP:timestamp})|(?:%{TIMESTAMP_ISO8601:timestamp}))\s*%{HOSTNAME:observer.hostname}.*?%{SFW2_LOG_PREFIX:suse.firewall.log_prefix}\s*%{IPTABLES}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/haproxy.go b/vendor/github.com/elastic/go-grok/patterns/haproxy.go new file mode 100644 index 00000000000..04a60da566c --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/haproxy.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var HAProxy map[string]string = map[string]string{ + "HAPROXYTIME": `\b%{HOUR}:%{MINUTE}(:%{SECOND})?\b`, + "HAPROXYDATE": `%{MONTHDAY}/%{MONTH}/%{YEAR}:%{HAPROXYTIME}.%{INT}`, + "HAPROXYCAPTUREDREQUESTHEADERS": `(?:-|%{DATA:haproxy.http.request.captured_headers})`, + "HAPROXYCAPTUREDRESPONSEHEADERS": `(?:-|%{DATA:haproxy.http.response.captured_headers})`, + "HAPROXYURI": `(?:%{URIPROTO:url.scheme}://)?(?:%{USER:url.username}(?::[^@]*)?@)?(?:%{IPORHOST:url.domain}(?::%{POSINT:url.port:int})?)?(?:%{URIPATH:url.path}(?:\?%{URIQUERY:url.query})?)?`, + "HAPROXYHTTPREQUESTLINE": `(?:|(?:%{WORD:http.request.method} %{HAPROXYURI:url.original}(?: HTTP/%{NUMBER:http.version})?))`, + "HAPROXYHTTPBASE": `%{IP:source.address}:%{INT:source.port:int} \[%{HAPROXYDATE:haproxy.request_date}\] %{NOTSPACE:haproxy.frontend_name} %{NOTSPACE:haproxy.backend_name}/(?:|%{NOTSPACE:haproxy.server_name}) (?:-1|%{INT:haproxy.http.request.time_wait_ms:int})/(?:-1|%{INT:haproxy.total_waiting_time_ms:int})/(?:-1|%{INT:haproxy.connection_wait_time_ms:int})/(?:-1|%{INT:haproxy.http.request.time_wait_without_data_ms:int})/%{NOTSPACE:haproxy.total_time_ms} %{INT:http.response.status_code:int} %{INT:source.bytes:long} (?:-|%{DATA:haproxy.http.request.captured_cookie}) (?:-|%{DATA:haproxy.http.response.captured_cookie}) %{NOTSPACE:haproxy.termination_state} %{INT:haproxy.connections.active:int}/%{INT:haproxy.connections.frontend:int}/%{INT:haproxy.connections.backend:int}/%{INT:haproxy.connections.server:int}/%{INT:haproxy.connections.retries:int} %{INT:haproxy.server_queue:int}/%{INT:haproxy.backend_queue:int}(?: \{%{HAPROXYCAPTUREDREQUESTHEADERS}\}(?: \{%{HAPROXYCAPTUREDRESPONSEHEADERS}\})?)?(?: "%{HAPROXYHTTPREQUESTLINE}"?)?`, + "HAPROXYHTTP": `(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp}) %{IPORHOST:host.name} %{SYSLOGPROG}: %{HAPROXYHTTPBASE}`, + "HAPROXYTCP": `(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp}) %{IPORHOST:host.name} %{SYSLOGPROG}: %{IP:source.address}:%{INT:source.port:int} \[%{HAPROXYDATE:haproxy.request_date}\] %{NOTSPACE:haproxy.frontend_name} %{NOTSPACE:haproxy.backend_name}/(?:|%{NOTSPACE:haproxy.server_name}) (?:-1|%{INT:haproxy.total_waiting_time_ms:int})/(?:-1|%{INT:haproxy.connection_wait_time_ms:int})/%{NOTSPACE:haproxy.total_time_ms} %{INT:source.bytes:long} %{NOTSPACE:haproxy.termination_state} %{INT:haproxy.connections.active:int}/%{INT:haproxy.connections.frontend:int}/%{INT:haproxy.connections.backend:int}/%{INT:haproxy.connections.server:int}/%{INT:haproxy.connections.retries:int} %{INT:haproxy.server_queue:int}/%{INT:haproxy.backend_queue:int}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/httpd.go b/vendor/github.com/elastic/go-grok/patterns/httpd.go new file mode 100644 index 00000000000..eff84457ab3 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/httpd.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Httpd map[string]string = map[string]string{ + "HTTPDUSER": `%{EMAILADDRESS}|%{USER}`, + "HTTPDERROR_DATE": `%{DAY} %{MONTH} %{MONTHDAY} %{TIME} %{YEAR}`, + + "HTTPD_COMMONLOG": `%{IPORHOST:source.address} (?:-|%{HTTPDUSER:apache.access.user.identity}) (?:-|%{HTTPDUSER:user.name}) \[%{HTTPDATE:timestamp}\] "(?:%{WORD:http.request.method} %{NOTSPACE:url.original}(?: HTTP/%{NUMBER:http.version})?|%{DATA})" (?:-|%{INT:http.response.status_code:int}) (?:-|%{INT:http.response.body.size:long})`, + "HTTPD_COMBINEDLOG": `%{HTTPD_COMMONLOG} "(?:-|%{DATA:http.request.referrer})" "(?:-|%{DATA:user_agent.original})"`, + + "HTTPD20_ERRORLOG": `\[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:log.level}\] (?:\[client %{IPORHOST:source.address}\] )?%{GREEDYDATA:message}`, + "HTTPD24_ERRORLOG": `\[%{HTTPDERROR_DATE:timestamp}\] \[(?:%{WORD:apache.error.module})?:%{LOGLEVEL:log.level}\] \[pid %{POSINT:process.pid:long}(:tid %{INT:process.thread.id:int})?\](?: \(%{POSINT:apache.error.proxy.error.code}\)?%{DATA:apache.error.proxy.error.message}:)?(?: \[client %{IPORHOST:source.address}(?::%{POSINT:source.port:int})?\])?(?: %{DATA:error.code}:)? %{GREEDYDATA:message}`, + "HTTPD_ERRORLOG": `%{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG}`, + + "COMMONAPACHELOG": `%{HTTPD_COMMONLOG}`, + "COMBINEDAPACHELOG": `%{HTTPD_COMBINEDLOG}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/java.go b/vendor/github.com/elastic/go-grok/patterns/java.go new file mode 100644 index 00000000000..12e5f2f0056 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/java.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Java map[string]string = map[string]string{ + "JAVACLASS": `(?:[a-zA-Z$_][a-zA-Z$_0-9]*\.)*[a-zA-Z$_][a-zA-Z$_0-9]*`, + "JAVAFILE": `(?:[a-zA-Z$_0-9. -]+)`, + "JAVAMETHOD": `(?:(<(?:cl)?init>)|[a-zA-Z$_][a-zA-Z$_0-9]*)`, + "JAVASTACKTRACEPART": `%{SPACE}at %{JAVACLASS:java.log.origin.class.name}\.%{JAVAMETHOD:log.origin.function}\(%{JAVAFILE:log.origin.file.name}(?::%{INT:log.origin.file.line:int})?\)`, + "JAVATHREAD": `(?:[A-Z]{2}-Processor[\d]+)`, + "JAVALOGMESSAGE": `(?:.*)`, + + "CATALINA7_DATESTAMP": `%{MONTH} %{MONTHDAY}, %{YEAR} %{HOUR}:%{MINUTE}:%{SECOND} (?:AM|PM)`, + "CATALINA7_LOG": `%{CATALINA7_DATESTAMP:timestamp} %{JAVACLASS:java.log.origin.class.name}(?: %{JAVAMETHOD:log.origin.function})?\s*(?:%{LOGLEVEL:log.level}:)? %{JAVALOGMESSAGE:message}`, + + "CATALINA8_DATESTAMP": `%{MONTHDAY}-%{MONTH}-%{YEAR} %{HOUR}:%{MINUTE}:%{SECOND}`, + "CATALINA8_LOG": `%{CATALINA8_DATESTAMP:timestamp} %{LOGLEVEL:log.level} \[%{DATA:java.log.origin.thread.name}\] %{JAVACLASS:java.log.origin.class.name}\.(?:%{JAVAMETHOD:log.origin.function})? %{JAVALOGMESSAGE:message}`, + + "CATALINA_DATESTAMP": `(?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP})`, + "CATALINALOG": `(?:%{CATALINA8_LOG})|(?:%{CATALINA7_LOG})`, + + "TOMCAT7_LOG": `%{CATALINA7_LOG}`, + "TOMCAT8_LOG": `%{CATALINA8_LOG}`, + + "TOMCATLEGACY_DATESTAMP": `%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND}(?: %{ISO8601_TIMEZONE})?`, + "TOMCATLEGACY_LOG": `%{TOMCATLEGACY_DATESTAMP:timestamp} \| %{LOGLEVEL:log.level} \| %{JAVACLASS:java.log.origin.class.name} - %{JAVALOGMESSAGE:message}`, + + "TOMCAT_DATESTAMP": `(?:%{CATALINA8_DATESTAMP})|(?:%{CATALINA7_DATESTAMP})|(?:%{TOMCATLEGACY_DATESTAMP})`, + + "TOMCATLOG": `(?:%{TOMCAT8_LOG})|(?:%{TOMCAT7_LOG})|(?:%{TOMCATLEGACY_LOG})`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/junos.go b/vendor/github.com/elastic/go-grok/patterns/junos.go new file mode 100644 index 00000000000..c244e156803 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/junos.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Junos map[string]string = map[string]string{ + "RT_FLOW_TAG": `(?:RT_FLOW_SESSION_CREATE|RT_FLOW_SESSION_CLOSE|RT_FLOW_SESSION_DENY)`, + "RT_FLOW_EVENT": `%{RT_FLOW_TAG}`, + + "RT_FLOW1": `%{RT_FLOW_TAG:juniper.srx.tag}: %{GREEDYDATA:juniper.srx.reason}: %{IP:source.address}/%{INT:source.port:int}->%{IP:destination.address}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{IP:source.nat.ip}/%{INT:source.nat.port:int}->%{IP:destination.nat.ip}/%{INT:destination.nat.port:int} (?:(?:None)|(?:%{DATA:juniper.srx.src_nat_rule_name})) (?:(?:None)|(?:%{DATA:juniper.srx.dst_nat_rule_name})) %{INT:network.iana_number} %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} %{INT:juniper.srx.session_id} \d+\(%{INT:source.bytes:long}\) \d+\(%{INT:destination.bytes:long}\) %{INT:juniper.srx.elapsed_time:int} .*`, + "RT_FLOW2": `%{RT_FLOW_TAG:juniper.srx.tag}: session created %{IP:source.address}/%{INT:source.port:int}->%{IP:destination.address}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{IP:source.nat.ip}/%{INT:source.nat.port:int}->%{IP:destination.nat.ip}/%{INT:destination.nat.port:int} (?:(?:None)|(?:%{DATA:juniper.srx.src_nat_rule_name})) (?:(?:None)|(?:%{DATA:juniper.srx.dst_nat_rule_name})) %{INT:network.iana_number} %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} %{INT:juniper.srx.session_id} .*`, + "RT_FLOW3": `%{RT_FLOW_TAG:juniper.srx.tag}: session denied %{IP:source.address}/%{INT:source.port:int}->%{IP:destination.address}/%{INT:destination.port:int} %{DATA:juniper.srx.service_name} %{INT:network.iana_number}\(\d\) %{DATA:rule.name} %{DATA:observer.ingress.zone} %{DATA:observer.egress.zone} (.*)?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/maven.go b/vendor/github.com/elastic/go-grok/patterns/maven.go new file mode 100644 index 00000000000..dab64d28ddf --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/maven.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Maven map[string]string = map[string]string{ + "MAVEN_VERSION": `(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)(?:[.-](RELEASE|SNAPSHOT))?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/mcollective.go b/vendor/github.com/elastic/go-grok/patterns/mcollective.go new file mode 100644 index 00000000000..d1a9600b5bd --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/mcollective.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var MCollective map[string]string = map[string]string{ + "MCOLLECTIVE": `., \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:process.pid:int}\]%{SPACE}%{LOGLEVEL:log.level}`, + "MCOLLECTIVEAUDIT": `%{TIMESTAMP_ISO8601:timestamp}:`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/mongodb.go b/vendor/github.com/elastic/go-grok/patterns/mongodb.go new file mode 100644 index 00000000000..a23d6e1ea50 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/mongodb.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var MongoDB map[string]string = map[string]string{ + "MONGO_LOG": `%{SYSLOGTIMESTAMP:timestamp} \[%{WORD:db.mongodb.component}\] %{GREEDYDATA:message}`, + "MONGO_QUERY_CONTENT": `(.*?)`, + "MONGO_QUERY": `\{ %{MONGO_QUERY_CONTENT:MONGO_QUERY} \} ntoreturn:`, + "MONGO_SLOWQUERY": `%{WORD:db.mongodb.profile.op} %{MONGO_WORDDASH:db.mongodb.database}\.%{MONGO_WORDDASH:db.mongodb.collection} %{WORD}: \{ %{MONGO_QUERY_CONTENT:db.mongodb.query.original} \} ntoreturn:%{NONNEGINT:db.mongodb.profile.ntoreturn:int} ntoskip:%{NONNEGINT:db.mongodb.profile.ntoskip:int} nscanned:%{NONNEGINT:db.mongodb.profile.nscanned:int}.*? nreturned:%{NONNEGINT:db.mongodb.profile.nreturned:int}.*? %{INT:db.mongodb.profile.duration:int}ms`, + "MONGO_WORDDASH": `\b[\w-]+\b`, + "MONGO3_SEVERITY": `\w`, + "MONGO3_COMPONENT": `%{WORD}`, + "MONGO3_LOG": `%{TIMESTAMP_ISO8601:timestamp} %{MONGO3_SEVERITY:log.level} (?:-|%{MONGO3_COMPONENT:db.mongodb.component})%{SPACE}(?:\[%{DATA:db.mongodb.context}\])? %{GREEDYDATA:message}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/postgresql.go b/vendor/github.com/elastic/go-grok/patterns/postgresql.go new file mode 100644 index 00000000000..5efa6c19151 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/postgresql.go @@ -0,0 +1,22 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var PostgreSQL map[string]string = map[string]string{ + "POSTGRESQL": "%{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int}", +} diff --git a/vendor/github.com/elastic/go-grok/patterns/rails.go b/vendor/github.com/elastic/go-grok/patterns/rails.go new file mode 100644 index 00000000000..4ddd67972e4 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/rails.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Rails map[string]string = map[string]string{ + "RUUID": `\S{32}`, + "RCONTROLLER": `(?P[^#]+)#(?P\w+)`, + + "RAILS3HEAD": `(?m)Started %{WORD:http.request.method} "%{URIPATHPARAM:url.original}" for %{IPORHOST:source.address} at (?%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{ISO8601_TIMEZONE})`, + "RPROCESSING": `\W*Processing by %{RCONTROLLER} as (?P\S+)(?:\W*Parameters: {%{DATA:rails.request.params}}\W*)?`, + "RAILS3FOOT": `Completed %{POSINT:http.response.status_code:int}%{DATA} in %{NUMBER:rails.request.duration.total:float}ms %{RAILS3PROFILE}%{GREEDYDATA}`, + "RAILS3PROFILE": `(?:\(Views: %{NUMBER:rails.request.duration.view:float}ms \| ActiveRecord: %{NUMBER:rails.request.duration.active_record:float}ms|\(ActiveRecord: %{NUMBER:rails.request.duration.active_record:float}ms)?`, + + "RAILS3": `%{RAILS3HEAD}(?:%{RPROCESSING})?(?P(?:%{DATA}\n)*)(?:%{RAILS3FOOT})?`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/redis.go b/vendor/github.com/elastic/go-grok/patterns/redis.go new file mode 100644 index 00000000000..5e4b119142f --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/redis.go @@ -0,0 +1,24 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Redis map[string]string = map[string]string{ + "REDISTIMESTAMP": `%{MONTHDAY} %{MONTH} %{TIME}`, + "REDISLOG": `\[%{POSINT:process.pid:int}\] %{REDISTIMESTAMP:timestamp} \*`, + "REDISMONLOG": `%{NUMBER:timestamp} \[%{INT:redis.database.id} %{IP:client.address}:%{POSINT:client.port:int}\] "%{WORD:redis.command.name}"\s?%{GREEDYDATA:redis.command.args}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/ruby.go b/vendor/github.com/elastic/go-grok/patterns/ruby.go new file mode 100644 index 00000000000..4d27bd8a4f5 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/ruby.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Ruby map[string]string = map[string]string{ + "RUBY_LOGLEVEL": `(?:DEBUG|FATAL|ERROR|WARN|INFO)`, + "RUBY_LOGGER": `[DFEWI], \[%{TIMESTAMP_ISO8601:timestamp} #%{POSINT:process.pid:int}\] *%{RUBY_LOGLEVEL:log.level} -- +%{DATA:process.command}: %{GREEDYDATA:message}`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/squid.go b/vendor/github.com/elastic/go-grok/patterns/squid.go new file mode 100644 index 00000000000..d1d50657896 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/squid.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Squid map[string]string = map[string]string{ + "SQUID3_STATUS": `(?:%{POSINT:http.response.status_code:int}|0|000)`, + "SQUID3": `%{NUMBER:timestamp}\s+%{NUMBER:squid.request.duration:int}\s%{IP:source.address}\s%{WORD:event.action}/%{SQUID3_STATUS}\s%{INT:http.response.bytes:long}\s%{WORD:http.request.method}\s%{NOTSPACE:url.original}\s(?:-|%{NOTSPACE:user.name})\s%{WORD:squid.hierarchy_code}/(?:-|%{IPORHOST:destination.address})\s(?:-|%{NOTSPACE:http.response.mime_type})`, +} diff --git a/vendor/github.com/elastic/go-grok/patterns/syslog.go b/vendor/github.com/elastic/go-grok/patterns/syslog.go new file mode 100644 index 00000000000..ba84266a7c7 --- /dev/null +++ b/vendor/github.com/elastic/go-grok/patterns/syslog.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package patterns + +var Syslog map[string]string = map[string]string{ + "SYSLOG5424PRINTASCII": `[!-~]+`, + + "SYSLOGBASE2": `(?:%{SYSLOGTIMESTAMP:timestamp}|%{TIMESTAMP_ISO8601:timestamp})(?: %{SYSLOGFACILITY})?(?: %{SYSLOGHOST:host.name})?(?: %{SYSLOGPROG}:)?`, + "SYSLOGPAMSESSION": `%{SYSLOGBASE} (%{GREEDYDATA:message})%{WORD:system.auth.pam.module}\(%{DATA:system.auth.pam.origin}\): session %{WORD:system.auth.pam.session_state} for user %{USERNAME:user.name}(?: by %{GREEDYDATA})?`, + + "CRON_ACTION": `[A-Z ]+`, + "CRONLOG": `%{SYSLOGBASE} \(%{USER:user.name}\) %{CRON_ACTION:system.cron.action} \(%{DATA:message}\)`, + + "SYSLOGLINE": `%{SYSLOGBASE2} %{GREEDYDATA:message}`, + + "SYSLOG5424PRI": `<%{NONNEGINT:log.syslog.priority:int}>`, + "SYSLOG5424SD": `\[%{DATA}\]+`, + "SYSLOG5424BASE": `%{SYSLOG5424PRI}%{NONNEGINT:system.syslog.version} +(?:-|%{TIMESTAMP_ISO8601:timestamp}) +(?:-|%{IPORHOST:host.name}) +(?:-|%{SYSLOG5424PRINTASCII:process.command}) +(?:-|%{POSINT:process.pid:int}) +(?:-|%{SYSLOG5424PRINTASCII:event.code}) +(?:-|%{SYSLOG5424SD:system.syslog.structured_data})?`, + + "SYSLOG5424LINE": `%{SYSLOG5424BASE} +%{GREEDYDATA:message}`, +} diff --git a/vendor/github.com/elastic/lunes/.editorconfig b/vendor/github.com/elastic/lunes/.editorconfig new file mode 100644 index 00000000000..0f7c6831979 --- /dev/null +++ b/vendor/github.com/elastic/lunes/.editorconfig @@ -0,0 +1,8 @@ +# See: http://editorconfig.org +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true \ No newline at end of file diff --git a/vendor/github.com/elastic/lunes/.gitignore b/vendor/github.com/elastic/lunes/.gitignore new file mode 100644 index 00000000000..91d5320da4f --- /dev/null +++ b/vendor/github.com/elastic/lunes/.gitignore @@ -0,0 +1,25 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# Idea +.idea +build diff --git a/vendor/github.com/elastic/lunes/.go-version b/vendor/github.com/elastic/lunes/.go-version new file mode 100644 index 00000000000..da9594fd66f --- /dev/null +++ b/vendor/github.com/elastic/lunes/.go-version @@ -0,0 +1 @@ +1.22.5 diff --git a/vendor/github.com/elastic/lunes/.golangci.yml b/vendor/github.com/elastic/lunes/.golangci.yml new file mode 100644 index 00000000000..8c2b5e60d07 --- /dev/null +++ b/vendor/github.com/elastic/lunes/.golangci.yml @@ -0,0 +1,114 @@ +# options for analysis running +run: + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 1m + +issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 0 + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + max-issues-per-linter: 0 + +output: + sort-results: true + +# Find the whole list here https://golangci-lint.run/usage/linters/ +linters: + disable-all: true + enable: + - errcheck # checking for unchecked errors in go programs + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - forbidigo # forbids identifiers matched by reg exps + - gomoddirectives # manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gosimple # linter for Go source code that specializes in simplifying a code + - misspell # finds commonly misspelled English words in comments + - nakedret # finds naked returns in functions greater than a specified function length + - nolintlint # reports ill-formed or insufficient nolint directives + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - stylecheck # a replacement for golint + - unused # checks Go code for unused constants, variables, functions and types + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # detects when assignments to existing variables are not used + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unused # Finds unused global variables and constants + - asciicheck # simple linter to check that your code does not contain non-ASCII identifiers + - bodyclose # checks whether HTTP response body is closed successfully + - durationcheck # check for two durations multiplied together + - exportloopref # checks for pointers to enclosing loop variables + - goimports # Goimports does everything that gofmt does. Additionally, it checks unused imports + - gosec # inspects source code for security problems + - importas # enforces consistent import aliases + - nilerr # finds the code that returns nil even if it checks that the error is not nil. + - noctx # noctx finds sending http request without context.Context + - unconvert # Remove unnecessary type conversions + - wastedassign # wastedassign finds wasted assignment statements. + - gomodguard # check for blocked dependencies + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assertions: `a := b.(MyStruct)`; + check-type-assertions: true + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + check-blank: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (mapstr.M).Delete # Only returns ErrKeyNotFound, can safely be ignored. + - (mapstr.M).Put # Can only fail on type conversions, usually safe to ignore. + + errorlint: + # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats + errorf: true + # Check for plain type assertions and type switches + asserts: true + # Check for plain error comparisons + comparison: true + + forbidigo: + # Forbid the following identifiers + forbid: + - fmt.Print.* # too much log noise + # Exclude godoc examples from forbidigo checks. Default is true. + exclude-godoc-examples: true + + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 0 + + nolintlint: + # Enable to ensure that nolint directives are all used. Default is true. + allow-unused: false + # Exclude following linters from requiring an explanation. Default is []. + allow-no-explanation: [] + # Enable to require an explanation of nonzero length after each nolint directive. Default is false. + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. + require-specific: true + + gomodguard: + blocked: + # List of blocked modules. + modules: + # Blocked module. + - github.com/pkg/errors: + # Recommended modules that should be used instead. (Optional) + recommendations: + - errors + - fmt + reason: "This package is deprecated, use fmt.Errorf with %%w instead" + - github.com/elastic/beats/v7: + reason: "There must be no Beats dependency" + + staticcheck: + # https://staticcheck.io/docs/options#checks + checks: ["all"] + + stylecheck: + # https://staticcheck.io/docs/options#checks + checks: ["all"] + diff --git a/vendor/github.com/elastic/lunes/CHANGELOG.md b/vendor/github.com/elastic/lunes/CHANGELOG.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/elastic/lunes/LICENSE b/vendor/github.com/elastic/lunes/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/elastic/lunes/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/lunes/NOTICE.txt b/vendor/github.com/elastic/lunes/NOTICE.txt new file mode 100644 index 00000000000..2a1c861765d --- /dev/null +++ b/vendor/github.com/elastic/lunes/NOTICE.txt @@ -0,0 +1,1991 @@ +Lunes +Copyright 2022-2024 Elasticsearch BV + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +================================================================================ +Third party libraries used by the Lunes Libraries: +================================================================================ + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-licenser +Version: v0.4.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-licenser@v0.4.2/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch B.V. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/magefile/mage +Version: v1.15.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/magefile/mage@v1.15.0/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/go-licence-detector +Version: v0.6.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/go-licence-detector@v0.6.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/tools +Version: v0.23.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/tools@v0.23.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +================================================================================ +Indirect dependencies + + +-------------------------------------------------------------------------------- +Dependency : github.com/cyphar/filepath-securejoin +Version: v0.2.5 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/cyphar/filepath-securejoin@v0.2.5/LICENSE: + +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/davecgh/go-spew +Version: v1.1.1 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: + +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/gobuffalo/here +Version: v0.6.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gobuffalo/here@v0.6.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/go-cmp +Version: v0.6.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-cmp@v0.6.0/LICENSE: + +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/licenseclassifier +Version: v0.0.0-20200402202327-879cb1424de0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifier@v0.0.0-20200402202327-879cb1424de0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : github.com/karrick/godirwalk +Version: v1.15.6 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/karrick/godirwalk@v1.15.6/LICENSE: + +BSD 2-Clause License + +Copyright (c) 2017, Karrick McDermott +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pretty +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pretty@v0.1.0/License: + +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/pty +Version: v1.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/pty@v1.1.1/License: + +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/kr/text +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/kr/text@v0.1.0/License: + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/markbates/pkger +Version: v0.17.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/markbates/pkger@v0.17.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pmezard/go-difflib +Version: v1.0.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pmezard/go-difflib@v1.0.0/LICENSE: + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/sergi/go-diff +Version: v1.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/sergi/go-diff@v1.1.0/LICENSE: + +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/objx +Version: v0.1.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/objx@v0.1.0/LICENSE: + +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/testify +Version: v1.6.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.6.1/LICENSE: + +MIT License + +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/yuin/goldmark +Version: v1.4.13 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/yuin/goldmark@v1.4.13/LICENSE: + +MIT License + +Copyright (c) 2019 Yusuke Inuzuka + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/crypto +Version: v0.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.13.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/mod +Version: v0.19.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/mod@v0.19.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.27.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.27.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sync +Version: v0.7.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.7.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sys +Version: v0.22.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.22.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/telemetry +Version: v0.0.0-20240521205824-bda55230c457 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/telemetry@v0.0.0-20240521205824-bda55230c457/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/term +Version: v0.12.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/term@v0.12.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/text +Version: v0.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.13.0/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/xerrors +Version: v0.0.0-20190717185122-a985d3407aa7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/xerrors@v0.0.0-20190717185122-a985d3407aa7/LICENSE: + +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/check.v1 +Version: v1.0.0-20190902080502-41f04d3bba15 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/check.v1@v1.0.0-20190902080502-41f04d3bba15/LICENSE: + +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v2 +Version: v2.2.7 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v2@v2.2.7/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : gopkg.in/yaml.v3 +Version: v3.0.0-20200313102051-9f266ea9e77c +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.0-20200313102051-9f266ea9e77c/LICENSE: + + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + diff --git a/vendor/github.com/elastic/lunes/README.md b/vendor/github.com/elastic/lunes/README.md new file mode 100644 index 00000000000..fce081713ab --- /dev/null +++ b/vendor/github.com/elastic/lunes/README.md @@ -0,0 +1,144 @@ +# Lunes + +--- + +**Lunes** is a [Go](http://golang.org) library for parsing localized time strings into `time.Time`. + +There's no intention to replace the standard `time` package parsing functions, instead, it acts as wrapper +translating the provided value to English before invoking the `time.Parse` and `time.ParseInLocation`. + +It currently supports almost all [CLDR](https://cldr.unicode.org/) core locales (+900 including drafts), +being limited to the **gregorian** calendars. + +Once the official Go i18n features for time parsing are ready, it should be replaced. + +## Usage + +#### Parse + +```go +// it's like time.Parse, but with an additional locale parameter to perform the value translation. +// the language argument must be a well-formed BCP 47 language tag, e.g ("en", "en-US") and +// a known locale. If no data is found for the language, it returns ErrUnsupportedLocale. +// If the given locale does not support any layout element specified on the layout argument, +// it results in an ErrUnsupportedLayoutElem error. On the other hand, if the value does not +// match the layout, an ErrLayoutMismatch is returned. +t, err := lunes.Parse("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", lunes.LocaleEsES) + +// parse in specific time zones. +t, err := lunes.ParseInLocation("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", time.UTC, lunes.LocaleEsES) +``` + +```go +// creates a new generic locale for the given BCP 47 language tag, using the default CLDR +// gregorian calendars data of the specified language. If the locale is unknown and/or no +// default data is found, it returns ErrUnsupportedLocale. +locale, err := lunes.NewDefaultLocale(lunes.LocaleEsES) + +// ParseWithLocale has a better performance for multiple parse operations, as it does not +// need to look up the locale data in each iteration. +for _, val := range valuesToParse { + t, err := lunes.ParseWithLocale("Monday Jan _2 2006 15:04:05", val, locale) +} +``` + +#### Translate + +```go +// translates the value, without parsing it to time.Time. The language argument must be a +// well-formed BCP 47 language tag, e.g ("en", "en-US") and a known locale. If no data is +// found for the language, it returns ErrUnsupportedLocale. +// If the given locale does not support any layout element specified on the layout argument, +// it results in an ErrUnsupportedLayoutElem error. On the other hand, if the values does not +// match the layout, an ErrLayoutMismatch is returned. +// For the following example, it results in: Friday Jan 27 11:53:29. +str, err := lunes.Translate("Monday Jan _2 15:04:05", "viernes ene 27 11:53:29", lunes.LocaleEsES) + +// the translated value is meant to be used with the time package functions +t, err := time.Parse("Monday Jan _2 15:04:05", str) +``` + +#### Custom Locales + +A `lunes.Locale` provides a collection of time layouts values in a specific language. +It is used to provide a map between the time layout elements in foreign language to English. +In oder to use custom locales, the following functions must be implemented: + +```go +// Language represents a BCP 47 tag, specifying this locale language. +Language() string + +// LongDayNames returns the long day names translations for the week days. +// It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, +// even if one or more days are empty. If this locale does not support this format, +// it should return an empty slice. +LongDayNames() []string + +// ShortDayNames returns the short day names translations for the week days. +// It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, +// even if one or more days are empty. If this locale does not support this format, +// it should return an empty slice. +ShortDayNames() []string + +// LongMonthNames returns the long day names translations for the months names. +// It must be sorted, starting from January to December, and contains all 12 elements, +// even if one or more months are empty. If this locale does not support this format, +// it should return an empty slice. +LongMonthNames() []string + +// ShortMonthNames returns the short day names translations for the months names. +// It must be sorted, starting from January to December, and contains all 12 elements, +// even if one or more months are empty. If this locale does not support this format, +// it should return an empty slice. +ShortMonthNames() []string + +// DayPeriods returns the periods of day translations for the AM and PM abbreviations. +// It must be sorted, starting from AM to PM, and contains both elements, even if one +// of them is empty. If this locale does not support this format, it should return an +// empty slice. +DayPeriods() []string +``` + +Custom locales can be used with the `lunes.ParseWithLocale`, `lunes.ParseInLocationWithLocale`, and `lunes.TranslateWithLocale` +functions: + +```go +locale := &CustomLocale{} + +// It's like Parse, but instead of receiving a BCP 47 language tag argument, it receives a lunes.Locale +t, err := lunes.ParseWithLocale("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", locale) + +// It's like ParseInLocation, but instead of receiving a BCP 47 language tag argument, it receives a lunes.Locale +t, err := lunes.ParseInLocationWithLocale("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", time.UTC, locale) + +// It's like Translate, but instead of receiving a BCP 47 language tag argument, it receives a lunes.Locale +t, err := lunes.TranslateWithLocale("Monday Jan _2 2006 15:04:05", "lunes oct 27 1988 11:53:29", locale) +``` + +## Benchmarks + +Comparing to [github.com/goodsign/monday](https://github.com/goodsign/monday) + +``` +BenchmarkTranslate-10 3850832 303.2 ns/op 220 B/op 5 allocs/op +BenchmarkTranslateWithLocale-10 5149981 235.1 ns/op 76 B/op 4 allocs/op +BenchmarkParse-10 2811612 428.1 ns/op 220 B/op 5 allocs/op +BenchmarkParseInLocation-10 2792997 439.2 ns/op 220 B/op 5 allocs/op +BenchmarkParseWithLocale-10 3268903 362.7 ns/op 76 B/op 4 allocs/op +BenchmarkParseInLocationWithLocale-10 2974732 390.2 ns/op 76 B/op 4 allocs/op +BenchmarkParseMonday-10 213014 5584 ns/op 3754 B/op 117 allocs/op +BenchmarkParseInLocationMonday-10 211826 5593 ns/op 3754 B/op 117 allocs/op +``` + +### Usage notes + +- It currently supports the following time layout replacements: + - Short days names (`Mon`) + - Long days names (`Monday`) + - Short month names (`Jan`) + - Long month names (`January`) + - Day periods (`PM`) +- Translations are auto-generated, and it might be inconsistent depending on the CLDR locale [stage](https://cldr.unicode.org/index/process). +- A few locales does not support (or are missing) translations for specific layout elements (short/long days/month names or day periods), in that case, + an ErrUnsupportedLayoutElem will be reported. + diff --git a/vendor/github.com/elastic/lunes/catalog-info.yaml b/vendor/github.com/elastic/lunes/catalog-info.yaml new file mode 100644 index 00000000000..5fbdfb98a76 --- /dev/null +++ b/vendor/github.com/elastic/lunes/catalog-info.yaml @@ -0,0 +1,57 @@ +# Declare a Backstage Component that represents your application. +--- +# yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: lunes + description: Lunes is a Go library for parsing localized time strings into time.Time + +spec: + type: library + owner: group:logstash + system: platform-ingest + lifecycle: production + +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-lunes + description: Buildkite Pipeline for lunes + links: + - title: Pipeline + url: https://buildkite.com/elastic/lunes + +spec: + type: buildkite-pipeline + owner: group:logstash + system: buildkite + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + name: lunes + description: Buildkite pipeline for the lunes library + spec: + branch_configuration: "main" + repository: elastic/lunes + pipeline_file: ".buildkite/pipeline.yml" + maximum_timeout_in_minutes: 60 + provider_settings: + build_pull_request_forks: false + build_pull_requests: true # requires filter_enabled and filter_condition settings as below when used with buildkite-pr-bot + build_tags: true + filter_enabled: true + filter_condition: >- + build.pull_request.id == null || (build.creator.name == 'elasticmachine' && build.pull_request.id != null) + cancel_intermediate_builds: true + cancel_intermediate_builds_branch_filter: '!main' + skip_intermediate_builds: true + skip_intermediate_builds_branch_filter: '!main' + teams: + logstash: + access_level: MANAGE_BUILD_AND_READ + everyone: + access_level: READ_ONLY diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/check.go b/vendor/github.com/elastic/lunes/dev-tools/mage/check.go new file mode 100644 index 00000000000..75d856ccab8 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/check.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/sh" +) + +//nolint:forbidigo // allow print +func CheckNoChanges() error { + fmt.Println(">> fmt - go run") + err := sh.RunV("go", "mod", "tidy", "-v") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff") + err = sh.RunV("git", "diff") + if err != nil { + return fmt.Errorf("failed running git diff, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git update-index") + err = sh.RunV("git", "update-index", "--refresh") + if err != nil { + return fmt.Errorf("failed running git update-index --refresh, please fix the issues reported: %w", err) + } + fmt.Println(">> fmt - git diff-index") + err = sh.RunV("git", "diff-index", "--exit-code", "HEAD", "--") + if err != nil { + return fmt.Errorf("failed running go mod tidy, please fix the issues reported: %w", err) + } + return nil +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/deps.go b/vendor/github.com/elastic/lunes/dev-tools/mage/deps.go new file mode 100644 index 00000000000..16822bf434f --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/deps.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + + "github.com/magefile/mage/mg" + + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +// Deps contains targets related to checking dependencies +type Deps mg.Namespace + +// CheckModuleTidy checks if `go mod tidy` was run before the last commit. +func (Deps) CheckModuleTidy() error { + err := gotool.Mod.Tidy() + if err != nil { + return err + } + err = assertUnchanged("go.mod") + if err != nil { + return fmt.Errorf("`go mod tidy` was not called before the last commit: %w", err) + } + + return nil +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go b/vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go new file mode 100644 index 00000000000..210b2e98cfe --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/fmt.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" + + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +const ( + // GoImportsImportPath controls the import path used to install goimports. + GoImportsImportPath = "golang.org/x/tools/cmd/goimports" + + // GoImportsLocalPrefix is a string prefix matching imports that should be + // grouped after third-party packages. + GoImportsLocalPrefix = "github.com/elastic" +) + +// Linter contains targets related to linting the Go code +type GoImports mg.Namespace + +// Run executes goimports against all .go files in and below the CWD. +func (GoImports) Run() error { + mg.Deps(GoImports.Install) + goFiles, err := FindFilesRecursive(func(path string, _ os.FileInfo) bool { + return filepath.Ext(path) == ".go" + }) + if err != nil { + return err + } + if len(goFiles) == 0 { + return nil + } + + fmt.Println(">> fmt - goimports: Formatting Go code") //nolint:forbidigo // it's a mage target + args := append( + []string{"-local", GoImportsLocalPrefix, "-l", "-w"}, + goFiles..., + ) + + return sh.RunV("goimports", args...) +} + +func (GoImports) Install() error { + err := gotool.Install(gotool.Install.Package(filepath.Join(GoImportsImportPath))) + if err != nil { + return fmt.Errorf("cannot install GoImports: %w", err) + } + + return nil +} + +// FindFilesRecursive recursively traverses from the CWD and invokes the given +// match function on each regular file to determine if the given path should be +// returned as a match. It ignores files in .git directories. +func FindFilesRecursive(match func(path string, info os.FileInfo) bool) ([]string, error) { + var matches []string + walkDir := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + // Don't look for files in git directories + if d.IsDir() && filepath.Base(path) == ".git" { + return filepath.SkipDir + } + + info, err := d.Info() + if err != nil { + return fmt.Errorf("canot get FileInfo: %w", err) + } + if !info.Mode().IsRegular() { + // continue + return nil + } + + if match(filepath.ToSlash(path), info) { + matches = append(matches, path) + } + return nil + } + + err := filepath.WalkDir(".", fs.WalkDirFunc(walkDir)) + return matches, err +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go new file mode 100644 index 00000000000..9770cc1a1b5 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/get.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +type goGet func(opts ...ArgOpt) error +type goDownload func(opts ...ArgOpt) error + +// Get runs `go get` and provides optionals for adding command line arguments. +var Get goGet = runGoGet + +func runGoGet(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("get", args) +} + +func (goGet) Download() ArgOpt { return flagBoolIf("-d", true) } +func (goGet) Update() ArgOpt { return flagBoolIf("-u", true) } +func (goGet) Package(pkg string) ArgOpt { return posArg(pkg) } + +// Download runs `go download` and provides optionals for adding command line arguments. +var Download goDownload = runGoDownload + +func runGoDownload(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("download", args) +} + +func (goDownload) All() ArgOpt { return posArg("all") } diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go new file mode 100644 index 00000000000..b7a833cfe4f --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/go.go @@ -0,0 +1,343 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import ( + "fmt" + "os" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// Args holds parameters, environment variables and flag information used to +// pass to the go tool. +type Args struct { + extra map[string]string // extra flags one can pass to the command + env map[string]string + flags map[string][]string + pos []string +} + +// ArgOpt is a functional option adding info to Args once executed. +type ArgOpt func(args *Args) + +type goInstall func(opts ...ArgOpt) error + +// Install runs `go install` and provides optionals for adding command line arguments. +var Install goInstall = runGoInstall + +func runGoInstall(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("install", args) +} + +func (goInstall) Package(pkg string) ArgOpt { return posArg(pkg) } +func (goInstall) Vendored() ArgOpt { return flagArg("-mod", "vendor") } + +type goTest func(opts ...ArgOpt) error + +// Test runs `go test` and provides optionals for adding command line arguments. +var Test goTest = runGoTest + +// GetModuleName returns the name of the module. +func GetModuleName() (string, error) { + lines, err := getLines(callGo(nil, "list", "-m")) + if err != nil { + return "", err + } + if len(lines) != 1 { + return "", fmt.Errorf("unexpected number of lines") + } + return lines[0], nil +} + +// ListProjectPackages lists all packages in the current project +func ListProjectPackages() ([]string, error) { + return ListPackages("./...") +} + +// ListPackages calls `go list` for every package spec given. +func ListPackages(pkgs ...string) ([]string, error) { + return getLines(callGo(nil, "list", pkgs...)) +} + +// ListDeps calls `go list -dep` for every package spec given. +func ListDeps(pkg string) ([]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}}{{end}}` + + return getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) +} + +func ListDepsForNotice() (string, error) { + return callGo(nil, "list", "-json", "-m", "all") +} + +// ListDepsLocation calls `go list -dep` for every package spec given. +func ListDepsLocation(pkg string) (map[string]string, error) { + const tmpl = `{{if not .Standard}}{{.ImportPath}};{{.Dir}}{{end}}` + + lines, err := getLines(callGo(nil, "list", "-deps", "-f", tmpl, pkg)) + if err != nil { + return nil, err + } + deps := make(map[string]string, len(lines)) + for _, l := range lines { + parts := strings.Split(l, ";") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid number of parts") + } + deps[parts[0]] = parts[1] + } + return deps, nil +} + +// ListTestFiles lists all go and cgo test files available in a package. +func ListTestFiles(pkg string) ([]string, error) { + const tmpl = `{{ range .TestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + `{{ range .XTestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + return getLines(callGo(nil, "list", "-f", tmpl, pkg)) +} + +// ListModuleCacheDir returns the module cache directory containing +// the specified module. If the module does not exist in the cache, +// an error will be returned. +func ListModuleCacheDir(pkg string) (string, error) { + return listModuleDir(pkg, false) +} + +// ListModuleVendorDir returns the vendor directory containing the +// specified module. If the module has not been vendored, an error +// will be returned. +func ListModuleVendorDir(pkg string) (string, error) { + return listModuleDir(pkg, true) +} + +func listModuleDir(pkg string, vendor bool) (string, error) { + env := map[string]string{ + // Make sure GOFLAGS does not influence behaviour. + "GOFLAGS": "", + } + args := []string{"-m", "-f", "{{.Dir}}"} + if vendor { + args = append(args, "-mod=vendor") + } + args = append(args, pkg) + lines, err := getLines(callGo(env, "list", args...)) + if err != nil { + return "", err + } + if n := len(lines); n != 1 { + return "", fmt.Errorf("expected 1 line, got %d while looking for %s", n, pkg) + } + return lines[0], nil +} + +// HasTests returns true if the given package contains test files. +func HasTests(pkg string) (bool, error) { + files, err := ListTestFiles(pkg) + if err != nil { + return false, err + } + return len(files) > 0, nil +} + +func (goTest) WithCoverage(to string) ArgOpt { + return combine(flagArg("-cover", ""), flagArgIf("-test.coverprofile", to)) +} +func (goTest) Short(b bool) ArgOpt { return flagBoolIf("-test.short", b) } +func (goTest) Use(bin string) ArgOpt { return extraArgIf("use", bin) } +func (goTest) OS(os string) ArgOpt { return envArgIf("GOOS", os) } +func (goTest) ARCH(arch string) ArgOpt { return envArgIf("GOARCH", arch) } +func (goTest) Create() ArgOpt { return flagArg("-c", "") } +func (goTest) Out(path string) ArgOpt { return flagArg("-o", path) } +func (goTest) Package(path string) ArgOpt { return posArg(path) } +func (goTest) Verbose() ArgOpt { return flagArg("-test.v", "") } +func runGoTest(opts ...ArgOpt) error { + args := buildArgs(opts) + if bin := args.Val("use"); bin != "" { + flags := map[string][]string{} + for k, v := range args.flags { + if strings.HasPrefix(k, "-test.") { + flags[k] = v + } + } + + useArgs := &Args{} + *useArgs = *args + useArgs.flags = flags + + _, err := sh.Exec(useArgs.env, os.Stdout, os.Stderr, bin, useArgs.build()...) + return err + } + + return runVGo("test", args) +} + +func getLines(out string, err error) ([]string, error) { + if err != nil { + return nil, err + } + + lines := strings.Split(out, "\n") + res := lines[:0] + for _, line := range lines { + line = strings.TrimSpace(line) + if len(line) > 0 { + res = append(res, line) + } + } + + return res, nil +} + +func callGo(env map[string]string, cmd string, opts ...string) (string, error) { //nolint:unparam // not always receives list + args := []string{cmd} + args = append(args, opts...) + return sh.OutputWith(env, mg.GoCmd(), args...) +} + +func runVGo(cmd string, args *Args) error { + return execGoWith(func(env map[string]string, cmd string, args ...string) error { + _, err := sh.Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err + }, cmd, args) +} + +func execGoWith( + fn func(map[string]string, string, ...string) error, + cmd string, args *Args, +) error { + cliArgs := []string{cmd} + cliArgs = append(cliArgs, args.build()...) + return fn(args.env, mg.GoCmd(), cliArgs...) +} + +func posArg(value string) ArgOpt { + return func(a *Args) { a.Add(value) } +} + +func extraArg(k, v string) ArgOpt { + return func(a *Args) { a.Extra(k, v) } +} + +func extraArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return extraArg(k, v) +} + +func envArg(k, v string) ArgOpt { + return func(a *Args) { a.Env(k, v) } +} + +func envArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return envArg(k, v) +} + +func flagArg(flag, value string) ArgOpt { + return func(a *Args) { a.Flag(flag, value) } +} + +func flagArgIf(flag, value string) ArgOpt { + if value == "" { + return nil + } + return flagArg(flag, value) +} + +func flagBoolIf(flag string, b bool) ArgOpt { + if b { + return flagArg(flag, "") + } + return nil +} + +func combine(opts ...ArgOpt) ArgOpt { + return func(a *Args) { + for _, opt := range opts { + if opt != nil { + opt(a) + } + } + } +} + +func buildArgs(opts []ArgOpt) *Args { + a := &Args{} + combine(opts...)(a) + return a +} + +// Extra sets a special k/v pair to be interpreted by the execution function. +func (a *Args) Extra(k, v string) { + if a.extra == nil { + a.extra = map[string]string{} + } + a.extra[k] = v +} + +// Val returns a special functions value for a given key. +func (a *Args) Val(k string) string { + if a.extra == nil { + return "" + } + return a.extra[k] +} + +// Env sets an environmant variable to be passed to the child process on exec. +func (a *Args) Env(k, v string) { + if a.env == nil { + a.env = map[string]string{} + } + a.env[k] = v +} + +// Flag adds a flag to be passed to the child process on exec. +func (a *Args) Flag(flag, value string) { + if a.flags == nil { + a.flags = map[string][]string{} + } + a.flags[flag] = append(a.flags[flag], value) +} + +// Add adds a positional argument to be passed to the child process on exec. +func (a *Args) Add(p string) { + a.pos = append(a.pos, p) +} + +func (a *Args) build() []string { + args := make([]string, 0, 2*len(a.flags)+len(a.pos)) + for k, values := range a.flags { + for _, v := range values { + args = append(args, k) + if v != "" { + args = append(args, v) + } + } + } + + args = append(args, a.pos...) + return args +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go new file mode 100644 index 00000000000..67978ab8218 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/licenser.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goLicenser func(opts ...ArgOpt) error + +// Licenser runs `go-licenser` and provides optionals for adding command line arguments. +var Licenser goLicenser = runGoLicenser + +func runGoLicenser(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licenser", args...) +} + +func (goLicenser) Check() ArgOpt { return flagBoolIf("-d", true) } +func (goLicenser) License(license string) ArgOpt { return flagArgIf("-license", license) } +func (goLicenser) Exclude(path string) ArgOpt { return flagArgIf("-exclude", path) } +func (goLicenser) Path(path string) ArgOpt { return posArg(path) } diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go new file mode 100644 index 00000000000..d7a880756b7 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/modules.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +// Mod is the command go mod. +var Mod = goMod{ + Download: modCommand{"download"}.run, + Init: modCommand{"init"}.run, + Tidy: modCommand{"tidy"}.run, + Verify: modCommand{"verify"}.run, + Vendor: modCommand{"vendor"}.run, +} + +type modCommand struct { + method string +} + +func (cmd modCommand) run(opts ...ArgOpt) error { + o := make([]ArgOpt, len(opts)+1) + o[0] = posArg(cmd.method) + for i, opt := range opts { + o[i+1] = opt + } + args := buildArgs(o) + return runVGo("mod", args) +} + +type goMod struct { + Download modDownload + Init modInit + Tidy modTidy + Verify modVerify + Vendor modVendor +} + +// modDownload cleans the go.mod file +type modDownload func(opts ...ArgOpt) error + +// modInit initializes a new go module in folder. +type modInit func(opts ...ArgOpt) error + +// modTidy cleans the go.mod file +type modTidy func(opts ...ArgOpt) error + +// modVerify check that deps have the expected content. +type modVerify func(opts ...ArgOpt) error + +// modVendor downloads and copies dependencies under the folder vendor. +type modVendor func(opts ...ArgOpt) error diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go new file mode 100644 index 00000000000..26669c3f74f --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/gotool/noticer.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import "github.com/magefile/mage/sh" + +type goNoticeGenerator func(opts ...ArgOpt) error + +// NoticeGenerator runs `go-license-detector` and provides optionals for adding command line arguments. +var NoticeGenerator goNoticeGenerator = runGoNoticeGenerator + +func runGoNoticeGenerator(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licence-detector", args...) +} + +func (goNoticeGenerator) Dependencies(path string) ArgOpt { return flagArg("-in", path) } +func (goNoticeGenerator) IncludeIndirect() ArgOpt { return flagBoolIf("-includeIndirect", true) } +func (goNoticeGenerator) Rules(path string) ArgOpt { return flagArg("-rules", path) } +func (goNoticeGenerator) Overrides(path string) ArgOpt { return flagArg("-overrides", path) } +func (goNoticeGenerator) NoticeTemplate(path string) ArgOpt { return flagArg("-noticeTemplate", path) } +func (goNoticeGenerator) NoticeOutput(path string) ArgOpt { return flagArg("-noticeOut", path) } +func (goNoticeGenerator) DepsOutput(path string) ArgOpt { return flagArg("-depsOut", path) } diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/install.go b/vendor/github.com/elastic/lunes/dev-tools/mage/install.go new file mode 100644 index 00000000000..0e30803f093 --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/install.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +var ( + // GoLicenserImportPath controls the import path used to install go-licenser. + GoLicenserImportPath = "github.com/elastic/go-licenser" + + // GoNoticeGeneratorImportPath controls the import path used to install go-licence-detector. + GoNoticeGeneratorImportPath = "go.elastic.co/go-licence-detector" +) + +// InstallGoLicenser target installs go-licenser +func InstallGoLicenser() error { + return gotool.Install( + gotool.Install.Package(GoLicenserImportPath), + ) +} + +// InstallGoLicenser target installs go-licenser +func InstallGoNoticeGen() error { + return gotool.Install( + gotool.Install.Package(GoNoticeGeneratorImportPath), + ) +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/linter.go b/vendor/github.com/elastic/lunes/dev-tools/mage/linter.go new file mode 100644 index 00000000000..d8bed5a245e --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/linter.go @@ -0,0 +1,175 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "path/filepath" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +const ( + linterVersion = "v1.55.2" + linterInstallURL = "https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh" +) + +var ( + linterConfigFilename = filepath.Join(".", ".golangci.yml") + linterInstallDir = filepath.Join(".", "build") + linterInstallFile = filepath.Join(linterInstallDir, "install-golang-ci.sh") + linterBinaryFile = filepath.Join(linterInstallDir, linterVersion, "golangci-lint") +) + +// Linter contains targets related to linting the Go code +type Linter mg.Namespace + +// CheckConfig makes sure that the `.golangci.yml` does not have uncommitted changes +func (Linter) CheckConfig() error { + err := assertUnchanged(linterConfigFilename) + if err != nil { + return fmt.Errorf("linter configuration has uncommitted changes: %w", err) + } + return nil +} + +// Install installs golangci-lint (https://golangci-lint.run) to `./build` +// using the official installation script downloaded from GitHub. +// If the linter binary already exists does nothing. +func (Linter) Install() error { + return install(false) +} + +// ForceInstall force installs the linter regardless of whether it exists or not. +func (Linter) ForceInstall() error { + return install(true) +} + +func install(force bool) error { + dirPath := filepath.Dir(linterBinaryFile) + err := os.MkdirAll(dirPath, 0700) + if err != nil { + return fmt.Errorf("failed to create path %q: %w", dirPath, err) + } + + _, err = os.Stat(linterBinaryFile) + if !force && err == nil { + log.Println("The linter has been already installed, skipping...") + return nil + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return fmt.Errorf("failed check if file %q exists: %w", linterBinaryFile, err) + } + + log.Println("Preparing the installation script file...") + + installScript, err := os.OpenFile(linterInstallFile, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0700) + if err != nil { + return fmt.Errorf("failed to create file %q: %w", linterInstallFile, err) + } + defer installScript.Close() + + log.Println("Downloading the linter installation script...") + //nolint:noctx // valid use since there is no context + resp, err := http.Get(linterInstallURL) + if err != nil { + return fmt.Errorf("cannot download the linter installation script from %q: %w", linterInstallURL, err) + } + defer resp.Body.Close() + + lr := io.LimitReader(resp.Body, 1024*100) // not more than 100 KB, just to be safe + _, err = io.Copy(installScript, lr) + if err != nil { + return fmt.Errorf("failed to finish downloading the linter installation script: %w", err) + } + + err = installScript.Close() // otherwise we cannot run the script + if err != nil { + return fmt.Errorf("failed to close file %q: %w", linterInstallFile, err) + } + + binaryDir := filepath.Dir(linterBinaryFile) + err = os.MkdirAll(binaryDir, 0700) + if err != nil { + return fmt.Errorf("cannot create path %q: %w", binaryDir, err) + } + + // there must be no space after `-b`, otherwise the script does not work correctly ¯\_(ツ)_/¯ + return sh.Run(linterInstallFile, "-b"+binaryDir, linterVersion) +} + +// All runs the linter against the entire codebase +func (l Linter) All() error { + mg.Deps(l.Install, l.CheckConfig) + return runLinter() +} + +// Version Prints the version of the linter in use. +func (l Linter) Version() error { + mg.Deps(l.Install) + return runLinter("--version") +} + +// LastChange runs the linter against all files changed since the fork point from `main`. +// If the current branch is `main` then runs against the files changed in the last commit. +func (l Linter) LastChange() error { + mg.Deps(l.Install, l.CheckConfig) + + // get current branch name + branch, err := sh.Output("git", "rev-parse", "--abbrev-ref", "HEAD") + if err != nil { + return fmt.Errorf("failed to get the current branch: %w", err) + } + + // the linter is supposed to support linting changed diffs only but, + // for some reason, it simply does not work - does not output any + // results without linting the whole files, so we have to use `--whole-files` + // which can lead to some frustration from developers who would like to + // fix a single line in an existing codebase and the linter would force them + // into fixing all linting issues in the whole file instead + + if branch == "main" { + // files changed in the last commit + return runLinter("--new-from-rev=HEAD~", "--whole-files") + } + + return runLinter("--new-from-rev=origin/main", "--whole-files") +} + +// runLinter runs the linter passing the `mage -v` (verbose mode) and given arguments. +// Also redirects linter's output to the `stderr` instead of discarding it. +func runLinter(runFlags ...string) error { + var args []string + + if mg.Verbose() { + args = append(args, "-v") + } + + args = append(args, "run") + args = append(args, runFlags...) + args = append(args, "-c", linterConfigFilename) + args = append(args, "./...") + + return runWithStdErr(linterBinaryFile, args...) +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/mage.go b/vendor/github.com/elastic/lunes/dev-tools/mage/mage.go new file mode 100644 index 00000000000..05841a0274e --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/mage.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/sh" +) + +func assertUnchanged(path string) error { + err := sh.Run("git", "diff", "--exit-code", path) + if err != nil { + return fmt.Errorf("failed to assert the unchanged file %q: %w", path, err) + } + + return nil +} + +// runWithStdErr runs a command redirecting its stderr to the console instead of discarding it +func runWithStdErr(command string, args ...string) error { + _, err := sh.Exec(nil, os.Stdout, os.Stderr, command, args...) + return err +} diff --git a/vendor/github.com/elastic/lunes/dev-tools/mage/notice.go b/vendor/github.com/elastic/lunes/dev-tools/mage/notice.go new file mode 100644 index 00000000000..19dad5bbefe --- /dev/null +++ b/vendor/github.com/elastic/lunes/dev-tools/mage/notice.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "fmt" + "os" + + "github.com/magefile/mage/mg" + + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +func GenerateNotice(overrides, rules, noticeTemplate string) error { + mg.Deps(InstallGoNoticeGen, Deps.CheckModuleTidy) + + err := gotool.Mod.Download(gotool.Download.All()) + if err != nil { + return fmt.Errorf("error while downloading dependencies: %w", err) + } + + // Ensure the go.mod file is left unchanged after go mod download all runs. + // go mod download will modify go.sum in a way that conflicts with go mod tidy. + // https://github.com/golang/go/issues/43994#issuecomment-770053099 + defer gotool.Mod.Tidy() //nolint:errcheck // No value in handling this error. + + out, _ := gotool.ListDepsForNotice() + depsFile, _ := os.CreateTemp("", "depsout") + defer os.Remove(depsFile.Name()) + _, _ = depsFile.Write([]byte(out)) + depsFile.Close() + + generator := gotool.NoticeGenerator + return generator( + generator.Dependencies(depsFile.Name()), + generator.IncludeIndirect(), + generator.Overrides(overrides), + generator.Rules(rules), + generator.NoticeTemplate(noticeTemplate), + generator.NoticeOutput("NOTICE.txt"), + ) +} diff --git a/vendor/github.com/elastic/lunes/locale.go b/vendor/github.com/elastic/lunes/locale.go new file mode 100644 index 00000000000..8b6e1e9d367 --- /dev/null +++ b/vendor/github.com/elastic/lunes/locale.go @@ -0,0 +1,113 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:generate go run generator.go + +package lunes + +import ( + "fmt" +) + +// A Locale provides a collection of time layouts values in a specific language. +// It is used to provide a map between the time layout elements in foreign language to English. +type Locale interface { + // Language represents a BCP 47 tag, specifying this locale language. + Language() string + + // LongDayNames returns the long day names translations for the week days. + // It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, + // even if one or more days are empty. If this locale does not support this format, + // it should return an empty slice. + LongDayNames() []string + + // ShortDayNames returns the short day names translations for the week days. + // It must be sorted, starting from Sunday to Saturday, and contains all 7 elements, + // even if one or more days are empty. If this locale does not support this format, + // it should return an empty slice. + ShortDayNames() []string + + // LongMonthNames returns the long day names translations for the months names. + // It must be sorted, starting from January to December, and contains all 12 elements, + // even if one or more months are empty. If this locale does not support this format, + // it should return an empty slice. + LongMonthNames() []string + + // ShortMonthNames returns the short day names translations for the months names. + // It must be sorted, starting from January to December, and contains all 12 elements, + // even if one or more months are empty. If this locale does not support this format, + // it should return an empty slice. + ShortMonthNames() []string + + // DayPeriods returns the periods of day translations for the AM and PM abbreviations. + // It must be sorted, starting from AM to PM, and contains both elements, even if one + // of them is empty. If this locale does not support this format, it should return an + // empty slice. + DayPeriods() []string +} + +type genericLocale struct { + lang string + table [5][]string +} + +func (g *genericLocale) LongDayNames() []string { + return g.table[longDayNamesField] +} + +func (g *genericLocale) ShortDayNames() []string { + return g.table[shortDayNamesField] +} + +func (g *genericLocale) LongMonthNames() []string { + return g.table[longMonthNamesField] +} + +func (g *genericLocale) ShortMonthNames() []string { + return g.table[shortMonthNamesField] +} + +func (g *genericLocale) DayPeriods() []string { + return g.table[dayPeriodsField] +} + +func (g *genericLocale) Language() string { + return g.lang +} + +// ErrUnsupportedLocale indicates that a provided language.Tag is not supported by the +// default CLDR generic locales. +type ErrUnsupportedLocale struct { + lang string +} + +func (e *ErrUnsupportedLocale) Error() string { + return fmt.Sprintf("locale %s not supported", e.lang) +} + +// NewDefaultLocale creates a new generic locale for the given BCP 47 language tag, using +// the default CLDR gregorian calendars data of the specified language. +// If the language is unknown and no default data is found, it returns ErrUnsupportedLocale. +func NewDefaultLocale(lang string) (Locale, error) { + table, ok := tables[lang] + if !ok { + return nil, &ErrUnsupportedLocale{lang} + } + + locale := genericLocale{lang: lang, table: table} + return &locale, nil +} diff --git a/vendor/github.com/elastic/lunes/lunes.go b/vendor/github.com/elastic/lunes/lunes.go new file mode 100644 index 00000000000..8074b2a3d86 --- /dev/null +++ b/vendor/github.com/elastic/lunes/lunes.go @@ -0,0 +1,482 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package lunes + +import ( + "errors" + "fmt" + "strings" + "time" + "unicode" +) + +var longDayNamesStd = []string{ + "Sunday", + "Monday", + "Tuesday", + "Wednesday", + "Thursday", + "Friday", + "Saturday", +} + +var shortDayNamesStd = []string{ + "Sun", + "Mon", + "Tue", + "Wed", + "Thu", + "Fri", + "Sat", +} + +var shortMonthNamesStd = []string{ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", +} + +var longMonthNamesStd = []string{ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", +} + +var dayPeriodsStdUpper = []string{ + "AM", + "PM", +} + +var dayPeriodsStdLower = []string{ + "am", + "pm", +} + +// Parse parses a formatted string in foreign language and returns the [time.Time] value +// it represents. See the documentation for the constant called [time.Layout] to see how to +// represent the format. +// +// After translating the foreign language value to English, it gets the time value by +// calling the Go standard [time.Parse] function. +// +// The language argument must be a well-formed BCP 47 language tag, e.g ("en", "en-US") and +// a known locale. If no data is found for the language, it returns ErrUnsupportedLocale. +// If the given language does not support any [time.Layout] element specified on the layout +// argument, it results in an ErrUnsupportedLayoutElem error. On the other hand, if the value +// does not match the layout, an ErrLayoutMismatch is returned. See the documentation for +// [time.Parse] for other possible errors it might return. +// +// To execute several parses for the same locale, use [ParseWithLocale] as it performs better. +func Parse(layout string, value string, lang string) (time.Time, error) { + locale, err := NewDefaultLocale(lang) + if err != nil { + return time.Time{}, err + } + + return ParseWithLocale(layout, value, locale) +} + +// ParseWithLocale is like Parse, but instead of receiving a BCP 47 language tag argument, +// it receives a built [lunes.Locale], avoiding looking up existing data in each operation +// and allowing extensibility. +func ParseWithLocale(layout string, value string, locale Locale) (time.Time, error) { + pv, err := TranslateWithLocale(layout, value, locale) + if err != nil { + return time.Time{}, err + } + + return time.Parse(layout, pv) +} + +// ParseInLocation is like Parse, but it interprets the time as in the given location. +// In addition to the [Parse] errors, it might return any [time.ParseInLocation] possible errors. +// To execute several parses for the same locale, use [ParseInLocationWithLocale] as it performs better. +func ParseInLocation(layout string, value string, lang string, location *time.Location) (time.Time, error) { + locale, err := NewDefaultLocale(lang) + if err != nil { + return time.Time{}, err + } + + return ParseInLocationWithLocale(layout, value, location, locale) +} + +// ParseInLocationWithLocale is like ParseInLocation, but instead of receiving a BCP 47 +// language tag argument, it receives a built [lunes.Locale], avoiding looking up existing +// data in each operation and allowing extensibility. +func ParseInLocationWithLocale(layout string, value string, location *time.Location, locale Locale) (time.Time, error) { + pv, err := TranslateWithLocale(layout, value, locale) + if err != nil { + return time.Time{}, err + } + + return time.ParseInLocation(layout, pv, location) +} + +// Translate parses a localized textual time value from the provided locale to English. +// It replaces short and long week days names, months names, and day periods by their +// equivalents. The first argument must be a native Go time layout. The second argument +// must be parseable using the format string (layout) provided as the first argument, +// but in the foreign language. The language argument must be a well-formed BCP 47 +// language tag, e.g ("en", "en-US") and a known locale. If no data is found for the +// language, it returns ErrUnsupportedLocale. +// +// If the given locale does not support a layout element specified on the layout argument, +// it results in an ErrUnsupportedLayoutElem error. On the other hand, if the value does +// not match the layout, an ErrLayoutMismatch is returned. +// +// This function is meant to return a value that can be used with the Go standard +// [time.Parse] or [time.ParseInLocation] methods. Although it maintains value's empty +// spaces that are not present in the layout string, it might drop them in the future, +// as they are ignored by both standard time parsings functions. +func Translate(layout string, value string, lang string) (string, error) { + locale, err := NewDefaultLocale(lang) + if err != nil { + return value, err + } + + return TranslateWithLocale(layout, value, locale) +} + +// TranslateWithLocale is like Translate, but instead of receiving a BCP 47 language tag +// argument, it receives a built [lunes.Locale], avoiding looking up existing data in each +// operation and allowing extensibility. +func TranslateWithLocale(layout string, value string, locale Locale) (string, error) { + var err error + var sb strings.Builder + var layoutOffset, valueOffset int + + sb.Grow(len(layout) + 32) + + for layoutOffset < len(layout) { + written := false + var lookupTab, stdTab []string + + switch c := int(layout[layoutOffset]); c { + case 'J': // January, Jan + if len(layout) >= layoutOffset+3 && layout[layoutOffset:layoutOffset+3] == "Jan" { + layoutElem := "" + if len(layout) >= layoutOffset+7 && layout[layoutOffset:layoutOffset+7] == "January" { + layoutElem = "January" + lookupTab = locale.LongMonthNames() + stdTab = longMonthNamesStd + } else if !startsWithLowerCase(layout[layoutOffset+3:]) { + layoutElem = "Jan" + lookupTab = locale.ShortMonthNames() + stdTab = shortMonthNamesStd + } + + if layoutElem == "" { + break + } + + if len(lookupTab) == 0 { + return "", newUnsupportedLayoutElemError(layoutElem, locale) + } + + layoutOffset += len(layoutElem) + valueOffset, err = writeLayoutValue(layoutElem, lookupTab, stdTab, valueOffset, value, &sb) + if err != nil { + return "", err + } + + written = true + } + case 'M': // Monday, Mon + if len(layout) >= layoutOffset+3 && layout[layoutOffset:layoutOffset+3] == "Mon" { + layoutElem := "" + if len(layout) >= layoutOffset+6 && layout[layoutOffset:layoutOffset+6] == "Monday" { + layoutElem = "Monday" + lookupTab = locale.LongDayNames() + stdTab = longDayNamesStd + } else if !startsWithLowerCase(layout[layoutOffset+3:]) { + layoutElem = "Mon" + lookupTab = locale.ShortDayNames() + stdTab = shortDayNamesStd + } + + if layoutElem == "" { + break + } + + if len(lookupTab) == 0 { + return "", newUnsupportedLayoutElemError(layoutElem, locale) + } + + layoutOffset += len(layoutElem) + valueOffset, err = writeLayoutValue(layoutElem, lookupTab, stdTab, valueOffset, value, &sb) + if err != nil { + return "", err + } + written = true + } + case 'P', 'p': // PM, pm + if len(layout) >= layoutOffset+2 && unicode.ToUpper(rune(layout[layoutOffset+1])) == 'M' { + var layoutElem string + // day-periods case matters for the time package parsing functions + if c == 'p' { + layoutElem = "pm" + stdTab = dayPeriodsStdLower + } else { + layoutElem = "PM" + stdTab = dayPeriodsStdUpper + } + + lookupTab = locale.DayPeriods() + if len(lookupTab) == 0 { + return "", newUnsupportedLayoutElemError(layoutElem, locale) + } + + layoutOffset += 2 + valueOffset, err = writeLayoutValue(layoutElem, lookupTab, stdTab, valueOffset, value, &sb) + if err != nil { + return "", err + } + written = true + } + case '_': // _2, _2006, __2 + // Although no translations happens here, it is still necessary to calculate the + // variable size of `_` values, so the layoutOffset stays synchronized with + // its layout element counterpart. + if len(layout) >= layoutOffset+2 && layout[layoutOffset+1] == '2' { + var layoutElemSize int + // _2006 is really a literal _, followed by the long year placeholder + if len(layout) >= layoutOffset+5 && layout[layoutOffset+1:layoutOffset+5] == "2006" { + if len(value) >= valueOffset+5 { + layoutElemSize = 5 // _2006 + } + } else { + if len(value) >= valueOffset+2 { + layoutElemSize = 2 // _2 + } + } + + if layoutElemSize > 0 { + layoutOffset += layoutElemSize + valueOffset, err = writeNextNonSpaceValue(value, valueOffset, layoutElemSize, &sb) + if err != nil { + return "", err + } + written = true + } + } + + if len(layout) >= layoutOffset+3 && layout[layoutOffset+1] == '_' && layout[layoutOffset+2] == '2' { + if len(value) >= valueOffset+3 { + layoutOffset += 3 + valueOffset, err = writeNextNonSpaceValue(value, valueOffset, 3, &sb) + if err != nil { + return "", err + } + written = true + } + } + } + + if !written { + var writtenSize int + if len(value) > valueOffset { + writtenSize, err = sb.WriteRune(rune(value[valueOffset])) + if err != nil { + return "", err + } + } + + layoutOffset++ + valueOffset += writtenSize + } + } + + if len(value) >= valueOffset { + sb.WriteString(value[valueOffset:]) + } + + return sb.String(), nil +} + +func writeNextNonSpaceValue(value string, offset int, max int, sb *strings.Builder) (int, error) { + nextValOffset, skippedSpaces, val, err := nextNonSpaceValue(value, offset, max) + if err != nil { + return offset, err + } + + if skippedSpaces > 0 { + val = strings.Repeat(" ", skippedSpaces) + val + } + + _, err = sb.WriteString(val) + if err != nil { + return offset, err + } + + return nextValOffset, nil +} + +func writeLayoutValue(layoutElem string, lookupTab, stdTab []string, valueOffset int, value string, sb *strings.Builder) (int, error) { + newOffset, skippedSpaces, foundStdValue, val := lookup(lookupTab, valueOffset, value, stdTab) + if foundStdValue == "" { + return valueOffset, newLayoutMismatchError(layoutElem, value) + } + + if skippedSpaces > 0 { + foundStdValue = strings.Repeat(" ", skippedSpaces) + foundStdValue + } + + _, err := sb.WriteString(foundStdValue) + if err != nil { + return valueOffset, err + } + + newOffset += len(val) + return newOffset, nil +} + +func nextNonSpaceValue(value string, offset int, max int) (newOffset, skippedSpaces int, val string, err error) { + newOffset = offset + for newOffset < len(value) && unicode.IsSpace(rune(value[newOffset])) { + newOffset++ + } + + skippedSpaces = newOffset - offset + if newOffset > len(value) { + return offset, skippedSpaces, "", errors.New("next non-space value not found") + } + + for newOffset < len(value) { + if !unicode.IsSpace(rune(value[newOffset])) { + val += string(value[newOffset]) + newOffset++ + } else { + return newOffset, skippedSpaces, val, nil + } + + if len(val) == max { + return newOffset, skippedSpaces, val, nil + } + } + + return newOffset, skippedSpaces, val, nil +} + +func lookup(lookupTab []string, offset int, val string, stdTab []string) (newOffset, skippedSpaces int, stdValue string, value string) { + newOffset = offset + for newOffset < len(val) && unicode.IsSpace(rune(val[newOffset])) { + newOffset++ + } + + skippedSpaces = newOffset - offset + if newOffset > len(val) { + return offset, skippedSpaces, "", val + } + + for i, v := range lookupTab { + // Already matched a more specific/longer value + if stdValue != "" && len(v) <= len(value) { + continue + } + + end := newOffset + len(v) + if end > len(val) { + continue + } + + candidate := val[newOffset:end] + if len(candidate) == len(v) && strings.EqualFold(candidate, v) { + stdValue = stdTab[i] + value = candidate + } + } + + return newOffset, skippedSpaces, stdValue, value +} + +func startsWithLowerCase(value string) bool { + if len(value) == 0 { + return false + } + c := value[0] + return 'a' <= c && c <= 'z' +} + +// ErrLayoutMismatch indicates that a provided value does not match its layout counterpart. +type ErrLayoutMismatch struct { + Value string + LayoutElem string +} + +func (l *ErrLayoutMismatch) Error() string { + return fmt.Sprintf(`value "%s" does not match the layout element "%s"`, l.Value, l.LayoutElem) +} + +func (l *ErrLayoutMismatch) Is(err error) bool { + var target *ErrLayoutMismatch + if ok := errors.As(err, &target); ok { + return l.Value == target.Value && l.LayoutElem == target.LayoutElem + } + return false +} + +func newLayoutMismatchError(elem, value string) error { + return &ErrLayoutMismatch{ + LayoutElem: elem, + Value: value, + } +} + +// ErrUnsupportedLayoutElem indicates that a provided layout element is not supported by +// the given locale/language. +type ErrUnsupportedLayoutElem struct { + LayoutElem string + Language string +} + +func (u *ErrUnsupportedLayoutElem) Error() string { + return fmt.Sprintf(`layout element "%s" is not support by the language "%s"`, u.LayoutElem, u.Language) +} + +func (u *ErrUnsupportedLayoutElem) Is(err error) bool { + var target *ErrUnsupportedLayoutElem + if ok := errors.As(err, &target); ok { + return u.Language == target.Language && u.LayoutElem == target.LayoutElem + } + return false +} + +func newUnsupportedLayoutElemError(elem string, locale Locale) error { + return &ErrUnsupportedLayoutElem{ + LayoutElem: elem, + Language: locale.Language(), + } +} diff --git a/vendor/github.com/elastic/lunes/magefile.go b/vendor/github.com/elastic/lunes/magefile.go new file mode 100644 index 00000000000..9a898fe3311 --- /dev/null +++ b/vendor/github.com/elastic/lunes/magefile.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build mage +// +build mage + +package main + +import ( + "fmt" + "path/filepath" + + "github.com/magefile/mage/mg" // mg contains helpful utility functions, like Deps + "github.com/magefile/mage/sh" + + // mage:import + "github.com/elastic/lunes/dev-tools/mage" + + devtools "github.com/elastic/lunes/dev-tools/mage" + "github.com/elastic/lunes/dev-tools/mage/gotool" +) + +// Aliases are shortcuts to long target names. +// nolint: deadcode // it's used by `mage`. +var Aliases = map[string]interface{}{ + "llc": mage.Linter.LastChange, + "lint": mage.Linter.All, +} + +// Check runs all the checks +// nolint: deadcode,unparam // it's used as a `mage` target and requires returning an error +func Check() error { + mg.Deps(devtools.InstallGoLicenser) + mg.Deps(devtools.Deps.CheckModuleTidy, CheckLicenseHeaders) + mg.Deps(devtools.CheckNoChanges) + return nil +} + +// Fmt formats code and adds license headers. +func Fmt() { + mg.Deps(devtools.GoImports.Run) + mg.Deps(AddLicenseHeaders) +} + +// AddLicenseHeaders adds ASL2 headers to .go files +func AddLicenseHeaders() error { + fmt.Println(">> fmt - go-licenser: Adding missing headers") + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.License("ASL2"), + ) +} + +// CheckLicenseHeaders checks ASL2 headers in .go files +func CheckLicenseHeaders() error { + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.Check(), + licenser.License("ASL2"), + ) +} + +// Notice generates a NOTICE.txt file for the module. +func Notice() error { + return devtools.GenerateNotice( + filepath.Join("dev-tools", "templates", "notice", "overrides.json"), + filepath.Join("dev-tools", "templates", "notice", "rules.json"), + filepath.Join("dev-tools", "templates", "notice", "NOTICE.txt.tmpl"), + ) +} + +// Generate the tables.go file +func GenerateTables() error { + if err := sh.Run("go", "generate"); err != nil { + return err + } + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return licenser( + licenser.License("ASL2"), + licenser.Path("tables.go"), + ) +} diff --git a/vendor/github.com/elastic/lunes/tables.go b/vendor/github.com/elastic/lunes/tables.go new file mode 100644 index 00000000000..b7be6eb0008 --- /dev/null +++ b/vendor/github.com/elastic/lunes/tables.go @@ -0,0 +1,9616 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by running "go generate" in github.com/elastic/lunes. DO NOT EDIT. + +package lunes + +var CLDRVersion = 45 + +var localeTableAa = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAaDJ = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Leqeeni", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAaER = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAaET = [5][]string{ + {"Aca", "Etl", "Tal", "Arb", "Kam", "Gum", "Sab"}, + {"Acaada", "Etleeni", "Talaata", "Arbaqa", "Kamiisi", "Gumqata", "Sabti"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableAb = [5][]string{ + {"Ам", "Ашә", "Аҩ", "Ах", "Аԥ", "Ахә", "Ас"}, + {"Амҽыша", "Ашәахьа", "Аҩаша", "Ахаша", "Аԥшьаша", "Ахәаша", "Асабша"}, + {"Ажь", "Жəаб", "Хəажә", "Мш", "Лаҵ", "Рашә", "Ԥхынгә", "Нанҳә", "Цəыб", "Жьҭ", "Абҵ", "Ԥхынҷ"}, + {"Ажьырныҳəа", "Жəабран", "Хəажəкыра", "Мшаԥы", "Лаҵара", "Рашəара", "Ԥхынгəы", "Нанҳəа", "Цəыббра", "Жьҭаара", "Абҵара", "Ԥхынҷкәын"}, + {}, +} + +var localeTableAbGE = [5][]string{ + {"Ам", "Ашә", "Аҩ", "Ах", "Аԥ", "Ахә", "Ас"}, + {"Амҽыша", "Ашәахьа", "Аҩаша", "Ахаша", "Аԥшьаша", "Ахәаша", "Асабша"}, + {"Ажь", "Жəаб", "Хəажә", "Мш", "Лаҵ", "Рашә", "Ԥхынгә", "Нанҳә", "Цəыб", "Жьҭ", "Абҵ", "Ԥхынҷ"}, + {"Ажьырныҳəа", "Жəабран", "Хəажəкыра", "Мшаԥы", "Лаҵара", "Рашəара", "Ԥхынгəы", "Нанҳəа", "Цəыббра", "Жьҭаара", "Абҵара", "Ԥхынҷкәын"}, + {}, +} + +var localeTableAf = [5][]string{ + {"So.", "Ma.", "Di.", "Wo.", "Do.", "Vr.", "Sa."}, + {"Sondag", "Maandag", "Dinsdag", "Woensdag", "Donderdag", "Vrydag", "Saterdag"}, + {"Jan.", "Feb.", "Mrt.", "Apr.", "Mei", "Jun.", "Jul.", "Aug.", "Sep.", "Okt.", "Nov.", "Des."}, + {"Januarie", "Februarie", "Maart", "April", "Mei", "Junie", "Julie", "Augustus", "September", "Oktober", "November", "Desember"}, + {"vm.", "nm."}, +} + +var localeTableAfNA = [5][]string{ + {"So.", "Ma.", "Di.", "Wo.", "Do.", "Vr.", "Sa."}, + {"Sondag", "Maandag", "Dinsdag", "Woensdag", "Donderdag", "Vrydag", "Saterdag"}, + {"Jan.", "Feb.", "Mrt.", "Apr.", "Mei", "Jun.", "Jul.", "Aug.", "Sep.", "Okt.", "Nov.", "Des."}, + {"Januarie", "Februarie", "Maart", "April", "Mei", "Junie", "Julie", "Augustus", "September", "Oktober", "November", "Desember"}, + {"vm.", "nm."}, +} + +var localeTableAfZA = [5][]string{ + {"So.", "Ma.", "Di.", "Wo.", "Do.", "Vr.", "Sa."}, + {"Sondag", "Maandag", "Dinsdag", "Woensdag", "Donderdag", "Vrydag", "Saterdag"}, + {"Jan.", "Feb.", "Mrt.", "Apr.", "Mei", "Jun.", "Jul.", "Aug.", "Sep.", "Okt.", "Nov.", "Des."}, + {"Januarie", "Februarie", "Maart", "April", "Mei", "Junie", "Julie", "Augustus", "September", "Oktober", "November", "Desember"}, + {"vm.", "nm."}, +} + +var localeTableAgq = [5][]string{ + {"nts", "kpa", "ghɔ", "tɔm", "ume", "ghɨ", "dzk"}, + {"tsuʔntsɨ", "tsuʔukpà", "tsuʔughɔe", "tsuʔutɔ̀mlò", "tsuʔumè", "tsuʔughɨ̂m", "tsuʔndzɨkɔʔɔ"}, + {"nùm", "kɨz", "tɨd", "taa", "see", "nzu", "dum", "fɔe", "dzu", "lɔm", "kaa", "fwo"}, + {"ndzɔ̀ŋɔ̀nùm", "ndzɔ̀ŋɔ̀kƗ̀zùʔ", "ndzɔ̀ŋɔ̀tƗ̀dʉ̀ghà", "ndzɔ̀ŋɔ̀tǎafʉ̄ghā", "ndzɔ̀ŋèsèe", "ndzɔ̀ŋɔ̀nzùghò", "ndzɔ̀ŋɔ̀dùmlo", "ndzɔ̀ŋɔ̀kwîfɔ̀e", "ndzɔ̀ŋɔ̀tƗ̀fʉ̀ghàdzughù", "ndzɔ̀ŋɔ̀ghǔuwelɔ̀m", "ndzɔ̀ŋɔ̀chwaʔàkaa wo", "ndzɔ̀ŋèfwòo"}, + {"a.g", "a.k"}, +} + +var localeTableAgqCM = [5][]string{ + {"nts", "kpa", "ghɔ", "tɔm", "ume", "ghɨ", "dzk"}, + {"tsuʔntsɨ", "tsuʔukpà", "tsuʔughɔe", "tsuʔutɔ̀mlò", "tsuʔumè", "tsuʔughɨ̂m", "tsuʔndzɨkɔʔɔ"}, + {"nùm", "kɨz", "tɨd", "taa", "see", "nzu", "dum", "fɔe", "dzu", "lɔm", "kaa", "fwo"}, + {"ndzɔ̀ŋɔ̀nùm", "ndzɔ̀ŋɔ̀kƗ̀zùʔ", "ndzɔ̀ŋɔ̀tƗ̀dʉ̀ghà", "ndzɔ̀ŋɔ̀tǎafʉ̄ghā", "ndzɔ̀ŋèsèe", "ndzɔ̀ŋɔ̀nzùghò", "ndzɔ̀ŋɔ̀dùmlo", "ndzɔ̀ŋɔ̀kwîfɔ̀e", "ndzɔ̀ŋɔ̀tƗ̀fʉ̀ghàdzughù", "ndzɔ̀ŋɔ̀ghǔuwelɔ̀m", "ndzɔ̀ŋɔ̀chwaʔàkaa wo", "ndzɔ̀ŋèfwòo"}, + {"a.g", "a.k"}, +} + +var localeTableAk = [5][]string{ + {"Kwe", "Dwo", "Ben", "Wuk", "Yaw", "Fia", "Mem"}, + {"Kwesida", "Dwowda", "Benada", "Wukuda", "Yawda", "Fida", "Memeneda"}, + {"S-Ɔ", "K-Ɔ", "E-Ɔ", "E-O", "E-K", "O-A", "A-K", "D-Ɔ", "F-Ɛ", "Ɔ-A", "Ɔ-O", "M-Ɔ"}, + {"Sanda-Ɔpɛpɔn", "Kwakwar-Ɔgyefuo", "Ebɔw-Ɔbenem", "Ebɔbira-Oforisuo", "Esusow Aketseaba-Kɔtɔnimba", "Obirade-Ayɛwohomumu", "Ayɛwoho-Kitawonsa", "Difuu-Ɔsandaa", "Fankwa-Ɛbɔ", "Ɔbɛsɛ-Ahinime", "Ɔberɛfɛw-Obubuo", "Mumu-Ɔpɛnimba"}, + {"AN", "EW"}, +} + +var localeTableAkGH = [5][]string{ + {"Kwe", "Dwo", "Ben", "Wuk", "Yaw", "Fia", "Mem"}, + {"Kwesida", "Dwowda", "Benada", "Wukuda", "Yawda", "Fida", "Memeneda"}, + {"S-Ɔ", "K-Ɔ", "E-Ɔ", "E-O", "E-K", "O-A", "A-K", "D-Ɔ", "F-Ɛ", "Ɔ-A", "Ɔ-O", "M-Ɔ"}, + {"Sanda-Ɔpɛpɔn", "Kwakwar-Ɔgyefuo", "Ebɔw-Ɔbenem", "Ebɔbira-Oforisuo", "Esusow Aketseaba-Kɔtɔnimba", "Obirade-Ayɛwohomumu", "Ayɛwoho-Kitawonsa", "Difuu-Ɔsandaa", "Fankwa-Ɛbɔ", "Ɔbɛsɛ-Ahinime", "Ɔberɛfɛw-Obubuo", "Mumu-Ɔpɛnimba"}, + {"AN", "EW"}, +} + +var localeTableAm = [5][]string{ + {"እሑድ", "ሰኞ", "ማክሰ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"እሑድ", "ሰኞ", "ማክሰኞ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"ጃን", "ፌብ", "ማርች", "ኤፕሪ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክቶ", "ኖቬም", "ዲሴም"}, + {"ጃንዋሪ", "ፌብሩዋሪ", "ማርች", "ኤፕሪል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክቶበር", "ኖቬምበር", "ዲሴምበር"}, + {"ጥዋት", "ከሰዓት"}, +} + +var localeTableAmET = [5][]string{ + {"እሑድ", "ሰኞ", "ማክሰ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"እሑድ", "ሰኞ", "ማክሰኞ", "ረቡዕ", "ሐሙስ", "ዓርብ", "ቅዳሜ"}, + {"ጃን", "ፌብ", "ማርች", "ኤፕሪ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክቶ", "ኖቬም", "ዲሴም"}, + {"ጃንዋሪ", "ፌብሩዋሪ", "ማርች", "ኤፕሪል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክቶበር", "ኖቬምበር", "ዲሴምበር"}, + {"ጥዋት", "ከሰዓት"}, +} + +var localeTableAn = [5][]string{ + {"dom", "lun", "mar", "mie", "chu", "vie", "sab"}, + {"dominche", "luns", "martz", "miercres", "chueves", "viernes", "sabado"}, + {"chi.", "feb.", "mar.", "abr.", "may.", "chn.", "chl.", "ago.", "set.", "oct.", "nov.", "avi."}, + {"de chinero", "de febrero", "de marzo", "d’abril", "de mayo", "de chunyo", "de chuliol", "d’agosto", "de setiembre", "d’octubre", "de noviembre", "d’aviento"}, + {"a.m.", "p.m."}, +} + +var localeTableAnES = [5][]string{ + {"dom", "lun", "mar", "mie", "chu", "vie", "sab"}, + {"dominche", "luns", "martz", "miercres", "chueves", "viernes", "sabado"}, + {"chi.", "feb.", "mar.", "abr.", "may.", "chn.", "chl.", "ago.", "set.", "oct.", "nov.", "avi."}, + {"de chinero", "de febrero", "de marzo", "d’abril", "de mayo", "de chunyo", "de chuliol", "d’agosto", "de setiembre", "d’octubre", "de noviembre", "d’aviento"}, + {"a.m.", "p.m."}, +} + +var localeTableApc = [5][]string{ + {}, + {"الأحد", "التنين", "التلاتا", "الأربعا", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون التاني", "شباط", "أذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين التاني", "كانون الأول"}, + {}, +} + +var localeTableApcSY = [5][]string{ + {}, + {"الأحد", "التنين", "التلاتا", "الأربعا", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون التاني", "شباط", "أذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين التاني", "كانون الأول"}, + {}, +} + +var localeTableAr = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableAr001 = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArAE = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArBH = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArDJ = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArDZ = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"جانفي", "فيفري", "مارس", "أفريل", "ماي", "جوان", "جويلية", "أوت", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArEG = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArEH = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArER = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArIL = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArIQ = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArJO = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArKM = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArKW = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArLB = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArLY = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArMA = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "ماي", "يونيو", "يوليوز", "غشت", "شتنبر", "أكتوبر", "نونبر", "دجنبر"}, + {"ص", "م"}, +} + +var localeTableArMR = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "إبريل", "مايو", "يونيو", "يوليو", "أغشت", "شتمبر", "أكتوبر", "نوفمبر", "دجمبر"}, + {"ص", "م"}, +} + +var localeTableArOM = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArPS = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArQA = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSA = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSD = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSO = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSS = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArSY = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"كانون الثاني", "شباط", "آذار", "نيسان", "أيار", "حزيران", "تموز", "آب", "أيلول", "تشرين الأول", "تشرين الثاني", "كانون الأول"}, + {"ص", "م"}, +} + +var localeTableArTD = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArTN = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"جانفي", "فيفري", "مارس", "أفريل", "ماي", "جوان", "جويلية", "أوت", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableArYE = [5][]string{ + {}, + {"الأحد", "الاثنين", "الثلاثاء", "الأربعاء", "الخميس", "الجمعة", "السبت"}, + {}, + {"يناير", "فبراير", "مارس", "أبريل", "مايو", "يونيو", "يوليو", "أغسطس", "سبتمبر", "أكتوبر", "نوفمبر", "ديسمبر"}, + {"ص", "م"}, +} + +var localeTableAs = [5][]string{ + {"দেও", "সোম", "মঙ্গল", "বুধ", "বৃহ", "শুক্ৰ", "শনি"}, + {"দেওবাৰ", "সোমবাৰ", "মঙ্গলবাৰ", "বুধবাৰ", "বৃহস্পতিবাৰ", "শুক্ৰবাৰ", "শনিবাৰ"}, + {"জানু", "ফেব্ৰু", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগ", "ছেপ্তে", "অক্টো", "নৱে", "ডিচে"}, + {"জানুৱাৰী", "ফেব্ৰুৱাৰী", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগষ্ট", "ছেপ্তেম্বৰ", "অক্টোবৰ", "নৱেম্বৰ", "ডিচেম্বৰ"}, + {"পূৰ্বাহ্ন", "অপৰাহ্ন"}, +} + +var localeTableAsIN = [5][]string{ + {"দেও", "সোম", "মঙ্গল", "বুধ", "বৃহ", "শুক্ৰ", "শনি"}, + {"দেওবাৰ", "সোমবাৰ", "মঙ্গলবাৰ", "বুধবাৰ", "বৃহস্পতিবাৰ", "শুক্ৰবাৰ", "শনিবাৰ"}, + {"জানু", "ফেব্ৰু", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগ", "ছেপ্তে", "অক্টো", "নৱে", "ডিচে"}, + {"জানুৱাৰী", "ফেব্ৰুৱাৰী", "মাৰ্চ", "এপ্ৰিল", "মে’", "জুন", "জুলাই", "আগষ্ট", "ছেপ্তেম্বৰ", "অক্টোবৰ", "নৱেম্বৰ", "ডিচেম্বৰ"}, + {"পূৰ্বাহ্ন", "অপৰাহ্ন"}, +} + +var localeTableAsa = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Ijm", "Jmo"}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Dec"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"icheheavo", "ichamthi"}, +} + +var localeTableAsaTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Ijm", "Jmo"}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Dec"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"icheheavo", "ichamthi"}, +} + +var localeTableAst = [5][]string{ + {"dom", "llu", "mar", "mié", "xue", "vie", "sáb"}, + {"domingu", "llunes", "martes", "miércoles", "xueves", "vienres", "sábadu"}, + {"xin", "feb", "mar", "abr", "may", "xun", "xnt", "ago", "set", "och", "pay", "avi"}, + {"de xineru", "de febreru", "de marzu", "d’abril", "de mayu", "de xunu", "de xunetu", "d’agostu", "de setiembre", "d’ochobre", "de payares", "d’avientu"}, + {"a", "p"}, +} + +var localeTableAstES = [5][]string{ + {"dom", "llu", "mar", "mié", "xue", "vie", "sáb"}, + {"domingu", "llunes", "martes", "miércoles", "xueves", "vienres", "sábadu"}, + {"xin", "feb", "mar", "abr", "may", "xun", "xnt", "ago", "set", "och", "pay", "avi"}, + {"de xineru", "de febreru", "de marzu", "d’abril", "de mayu", "de xunu", "de xunetu", "d’agostu", "de setiembre", "d’ochobre", "de payares", "d’avientu"}, + {"a", "p"}, +} + +var localeTableAz = [5][]string{ + {"B.", "B.e.", "Ç.a.", "Ç.", "C.a.", "C.", "Ş."}, + {"bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr"}, + {"a", "p"}, +} + +var localeTableAzCyrl = [5][]string{ + {"Б.", "Б.Е.", "Ч.А.", "Ч.", "Ҹ.А.", "Ҹ.", "Ш."}, + {"базар", "базар ертәси", "чәршәнбә ахшамы", "чәршәнбә", "ҹүмә ахшамы", "ҹүмә", "шәнбә"}, + {"јан", "фев", "мар", "апр", "май", "ијн", "ијл", "авг", "сен", "окт", "ној", "дек"}, + {"јанвар", "феврал", "март", "апрел", "май", "ијун", "ијул", "август", "сентјабр", "октјабр", "нојабр", "декабр"}, + {"АМ", "ПМ"}, +} + +var localeTableAzCyrlAZ = [5][]string{ + {"Б.", "Б.Е.", "Ч.А.", "Ч.", "Ҹ.А.", "Ҹ.", "Ш."}, + {"базар", "базар ертәси", "чәршәнбә ахшамы", "чәршәнбә", "ҹүмә ахшамы", "ҹүмә", "шәнбә"}, + {"јан", "фев", "мар", "апр", "май", "ијн", "ијл", "авг", "сен", "окт", "ној", "дек"}, + {"јанвар", "феврал", "март", "апрел", "май", "ијун", "ијул", "август", "сентјабр", "октјабр", "нојабр", "декабр"}, + {"АМ", "ПМ"}, +} + +var localeTableAzLatn = [5][]string{ + {"B.", "B.e.", "Ç.a.", "Ç.", "C.a.", "C.", "Ş."}, + {"bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr"}, + {"a", "p"}, +} + +var localeTableAzLatnAZ = [5][]string{ + {"B.", "B.e.", "Ç.a.", "Ç.", "C.a.", "C.", "Ş."}, + {"bazar", "bazar ertəsi", "çərşənbə axşamı", "çərşənbə", "cümə axşamı", "cümə", "şənbə"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avq", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avqust", "sentyabr", "oktyabr", "noyabr", "dekabr"}, + {"a", "p"}, +} + +var localeTableBal = [5][]string{ + {"یک", "دو", "سئے", "چار", "پنچ", "جمه", "شم"}, + {"یکشمبه", "دوشمبه", "سئیشمبه", "چارشمبه", "پنچشمبه", "جمه", "شمبه"}, + {"جن", "پر", "مار", "اپر", "مئیی", "جون", "جۆل", "اگست", "ستم", "اکت", "نئوم", "دسم"}, + {"جنوری", "پروری", "مارچ", "اپرێل", "مئیی", "جون", "جۆلایی", "اگست", "ستمبر", "اکتوبر", "نئومبر", "دسمبر"}, + {}, +} + +var localeTableBalArab = [5][]string{ + {"یک", "دو", "سئے", "چار", "پنچ", "جمه", "شم"}, + {"یکشمبه", "دوشمبه", "سئیشمبه", "چارشمبه", "پنچشمبه", "جمه", "شمبه"}, + {"جن", "پر", "مار", "اپر", "مئیی", "جون", "جۆل", "اگست", "ستم", "اکت", "نئوم", "دسم"}, + {"جنوری", "پروری", "مارچ", "اپرێل", "مئیی", "جون", "جۆلایی", "اگست", "ستمبر", "اکتوبر", "نئومبر", "دسمبر"}, + {}, +} + +var localeTableBalArabPK = [5][]string{ + {"یک", "دو", "سئے", "چار", "پنچ", "جمه", "شم"}, + {"یکشمبه", "دوشمبه", "سئیشمبه", "چارشمبه", "پنچشمبه", "جمه", "شمبه"}, + {"جن", "پر", "مار", "اپر", "مئیی", "جون", "جۆل", "اگست", "ستم", "اکت", "نئوم", "دسم"}, + {"جنوری", "پروری", "مارچ", "اپرێل", "مئیی", "جون", "جۆلایی", "اگست", "ستمبر", "اکتوبر", "نئومبر", "دسمبر"}, + {}, +} + +var localeTableBalLatn = [5][]string{ + {"Yak", "Do", "Say", "Chá", "Pan", "Jom", "Sha"}, + {"Yakshambeh", "Doshambeh", "Sayshambeh", "Chárshambeh", "Panchshambeh", "Jomah", "Shambeh"}, + {"Jan", "Par", "Már", "Apr", "Mai", "Jun", "Jól", "Aga", "Sat", "Akt", "Naw", "Das"}, + {"Janwari", "Parwari", "Márch", "Aprél", "Mai", "Jun", "Jólái", "Agast", "Satambar", "Aktubar", "Nawambar", "Dasambar"}, + {}, +} + +var localeTableBalLatnPK = [5][]string{ + {"Yak", "Do", "Say", "Chá", "Pan", "Jom", "Sha"}, + {"Yakshambeh", "Doshambeh", "Sayshambeh", "Chárshambeh", "Panchshambeh", "Jomah", "Shambeh"}, + {"Jan", "Par", "Már", "Apr", "Mai", "Jun", "Jól", "Aga", "Sat", "Akt", "Naw", "Das"}, + {"Janwari", "Parwari", "Márch", "Aprél", "Mai", "Jun", "Jólái", "Agast", "Satambar", "Aktubar", "Nawambar", "Dasambar"}, + {}, +} + +var localeTableBas = [5][]string{ + {"nɔy", "nja", "uum", "ŋge", "mbɔ", "kɔɔ", "jon"}, + {"ŋgwà nɔ̂y", "ŋgwà njaŋgumba", "ŋgwà ûm", "ŋgwà ŋgê", "ŋgwà mbɔk", "ŋgwà kɔɔ", "ŋgwà jôn"}, + {"kɔn", "mac", "mat", "mto", "mpu", "hil", "nje", "hik", "dip", "bio", "may", "liɓ"}, + {"Kɔndɔŋ", "Màcɛ̂l", "Màtùmb", "Màtop", "M̀puyɛ", "Hìlòndɛ̀", "Njèbà", "Hìkaŋ", "Dìpɔ̀s", "Bìòôm", "Màyɛsèp", "Lìbuy li ńyèe"}, + {"Ibikɛ̂glà", "Iɓugajɔp"}, +} + +var localeTableBasCM = [5][]string{ + {"nɔy", "nja", "uum", "ŋge", "mbɔ", "kɔɔ", "jon"}, + {"ŋgwà nɔ̂y", "ŋgwà njaŋgumba", "ŋgwà ûm", "ŋgwà ŋgê", "ŋgwà mbɔk", "ŋgwà kɔɔ", "ŋgwà jôn"}, + {"kɔn", "mac", "mat", "mto", "mpu", "hil", "nje", "hik", "dip", "bio", "may", "liɓ"}, + {"Kɔndɔŋ", "Màcɛ̂l", "Màtùmb", "Màtop", "M̀puyɛ", "Hìlòndɛ̀", "Njèbà", "Hìkaŋ", "Dìpɔ̀s", "Bìòôm", "Màyɛsèp", "Lìbuy li ńyèe"}, + {"Ibikɛ̂glà", "Iɓugajɔp"}, +} + +var localeTableBe = [5][]string{ + {"нд", "пн", "аў", "ср", "чц", "пт", "сб"}, + {"нядзеля", "панядзелак", "аўторак", "серада", "чацвер", "пятніца", "субота"}, + {"сту", "лют", "сак", "кра", "мая", "чэр", "ліп", "жні", "вер", "кас", "ліс", "сне"}, + {"студзеня", "лютага", "сакавіка", "красавіка", "мая", "чэрвеня", "ліпеня", "жніўня", "верасня", "кастрычніка", "лістапада", "снежня"}, + {"am", "pm"}, +} + +var localeTableBeBY = [5][]string{ + {"нд", "пн", "аў", "ср", "чц", "пт", "сб"}, + {"нядзеля", "панядзелак", "аўторак", "серада", "чацвер", "пятніца", "субота"}, + {"сту", "лют", "сак", "кра", "мая", "чэр", "ліп", "жні", "вер", "кас", "ліс", "сне"}, + {"студзеня", "лютага", "сакавіка", "красавіка", "мая", "чэрвеня", "ліпеня", "жніўня", "верасня", "кастрычніка", "лістапада", "снежня"}, + {"am", "pm"}, +} + +var localeTableBetarask = [5][]string{ + {"няд", "пан", "аўт", "сер", "чац", "пят", "суб"}, + {"нядзеля", "панядзелак", "аўторак", "серада", "чацьвер", "пятніца", "субота"}, + {"сту", "лют", "сак", "кра", "тра", "чэр", "ліп", "жні", "вер", "кас", "ліс", "сьн"}, + {"студзеня", "лютага", "сакавіка", "красавіка", "траўня", "чэрвеня", "ліпеня", "жніўня", "верасьня", "кастрычніка", "лістапада", "сьнежня"}, + {"апоўначы", "апоўдні"}, +} + +var localeTableBem = [5][]string{ + {}, + {"Pa Mulungu", "Palichimo", "Palichibuli", "Palichitatu", "Palichine", "Palichisano", "Pachibelushi"}, + {"Jan", "Feb", "Mac", "Epr", "Mei", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Machi", "Epreo", "Mei", "Juni", "Julai", "Ogasti", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {"uluchelo", "akasuba"}, +} + +var localeTableBemZM = [5][]string{ + {}, + {"Pa Mulungu", "Palichimo", "Palichibuli", "Palichitatu", "Palichine", "Palichisano", "Pachibelushi"}, + {"Jan", "Feb", "Mac", "Epr", "Mei", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Machi", "Epreo", "Mei", "Juni", "Julai", "Ogasti", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {"uluchelo", "akasuba"}, +} + +var localeTableBew = [5][]string{ + {"Min", "Sen", "Sel", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senèn", "Selasa", "Rebo", "Kemis", "Juma’at", "Saptu"}, + {"Jan", "Pèb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sèp", "Okt", "Nop", "Dés"}, + {"Januari", "Pèbruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Sèptèmber", "Oktober", "Nopèmber", "Désèmber"}, + {"pg/sg", "sr/mlm"}, +} + +var localeTableBewID = [5][]string{ + {"Min", "Sen", "Sel", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senèn", "Selasa", "Rebo", "Kemis", "Juma’at", "Saptu"}, + {"Jan", "Pèb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sèp", "Okt", "Nop", "Dés"}, + {"Januari", "Pèbruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Sèptèmber", "Oktober", "Nopèmber", "Désèmber"}, + {"pg/sg", "sr/mlm"}, +} + +var localeTableBez = [5][]string{ + {"Mul", "Vil", "Hiv", "Hid", "Hit", "Hih", "Lem"}, + {"pa mulungu", "pa shahuviluha", "pa hivili", "pa hidatu", "pa hitayi", "pa hihanu", "pa shahulembela"}, + {"Hut", "Vil", "Dat", "Tai", "Han", "Sit", "Sab", "Nan", "Tis", "Kum", "Kmj", "Kmb"}, + {"pa mwedzi gwa hutala", "pa mwedzi gwa wuvili", "pa mwedzi gwa wudatu", "pa mwedzi gwa wutai", "pa mwedzi gwa wuhanu", "pa mwedzi gwa sita", "pa mwedzi gwa saba", "pa mwedzi gwa nane", "pa mwedzi gwa tisa", "pa mwedzi gwa kumi", "pa mwedzi gwa kumi na moja", "pa mwedzi gwa kumi na mbili"}, + {"pamilau", "pamunyi"}, +} + +var localeTableBezTZ = [5][]string{ + {"Mul", "Vil", "Hiv", "Hid", "Hit", "Hih", "Lem"}, + {"pa mulungu", "pa shahuviluha", "pa hivili", "pa hidatu", "pa hitayi", "pa hihanu", "pa shahulembela"}, + {"Hut", "Vil", "Dat", "Tai", "Han", "Sit", "Sab", "Nan", "Tis", "Kum", "Kmj", "Kmb"}, + {"pa mwedzi gwa hutala", "pa mwedzi gwa wuvili", "pa mwedzi gwa wudatu", "pa mwedzi gwa wutai", "pa mwedzi gwa wuhanu", "pa mwedzi gwa sita", "pa mwedzi gwa saba", "pa mwedzi gwa nane", "pa mwedzi gwa tisa", "pa mwedzi gwa kumi", "pa mwedzi gwa kumi na moja", "pa mwedzi gwa kumi na mbili"}, + {"pamilau", "pamunyi"}, +} + +var localeTableBg = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неделя", "понеделник", "вторник", "сряда", "четвъртък", "петък", "събота"}, + {"яну", "фев", "март", "апр", "май", "юни", "юли", "авг", "сеп", "окт", "ное", "дек"}, + {"януари", "февруари", "март", "април", "май", "юни", "юли", "август", "септември", "октомври", "ноември", "декември"}, + {"am", "pm"}, +} + +var localeTableBgBG = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неделя", "понеделник", "вторник", "сряда", "четвъртък", "петък", "събота"}, + {"яну", "фев", "март", "апр", "май", "юни", "юли", "авг", "сеп", "окт", "ное", "дек"}, + {"януари", "февруари", "март", "април", "май", "юни", "юли", "август", "септември", "октомври", "ноември", "декември"}, + {"am", "pm"}, +} + +var localeTableBgc = [5][]string{ + {}, + {"ऐतवार", "सोमवार", "मंगलवार", "बुधवार", "बृहस्पतवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवम्बर", "दिसंबर"}, + {}, +} + +var localeTableBgcIN = [5][]string{ + {}, + {"ऐतवार", "सोमवार", "मंगलवार", "बुधवार", "बृहस्पतवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवम्बर", "दिसंबर"}, + {}, +} + +var localeTableBho = [5][]string{ + {}, + {"रबीबार", "सोमबार", "मंगलबार", "बुधबार", "बृहस्पतिबार", "शुक्रबार", "सनीचर"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableBhoIN = [5][]string{ + {}, + {"रबीबार", "सोमबार", "मंगलबार", "बुधबार", "बृहस्पतिबार", "शुक्रबार", "सनीचर"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableBlo = [5][]string{ + {"alah", "aɖɩt", "atal", "alar", "alam", "arɩs", "asib"}, + {"alahaɖɩ", "aɖɩtɛnɛɛ", "atalaata", "alaarba", "alaamɩshɩ", "arɩsǝma", "asiibi"}, + {"kaw", "kpa", "ci", "ɖʊ", "ɖu5", "ɖu6", "la", "kǝu", "fʊm", "cim", "pom", "bʊn"}, + {"ɩjikawǝrka kaŋɔrɔ", "ɩjikpaka kaŋɔrɔ", "arɛ́cika kaŋɔrɔ", "njɩbɔ nɖʊka kaŋɔrɔ", "acafʊnɖuka kaŋɔrɔ", "anɔɔɖuka kaŋɔrɔ", "alàlaka kaŋɔrɔ", "ɩjikǝuka kaŋɔrɔ", "abofʊmka kaŋɔrɔ", "ɩjicimka kaŋɔrɔ", "acapomka kaŋɔrɔ", "anɔɔbʊnka kaŋɔrɔ"}, + {"1ka", "2ja"}, +} + +var localeTableBloBJ = [5][]string{ + {"alah", "aɖɩt", "atal", "alar", "alam", "arɩs", "asib"}, + {"alahaɖɩ", "aɖɩtɛnɛɛ", "atalaata", "alaarba", "alaamɩshɩ", "arɩsǝma", "asiibi"}, + {"kaw", "kpa", "ci", "ɖʊ", "ɖu5", "ɖu6", "la", "kǝu", "fʊm", "cim", "pom", "bʊn"}, + {"ɩjikawǝrka kaŋɔrɔ", "ɩjikpaka kaŋɔrɔ", "arɛ́cika kaŋɔrɔ", "njɩbɔ nɖʊka kaŋɔrɔ", "acafʊnɖuka kaŋɔrɔ", "anɔɔɖuka kaŋɔrɔ", "alàlaka kaŋɔrɔ", "ɩjikǝuka kaŋɔrɔ", "abofʊmka kaŋɔrɔ", "ɩjicimka kaŋɔrɔ", "acapomka kaŋɔrɔ", "anɔɔbʊnka kaŋɔrɔ"}, + {"1ka", "2ja"}, +} + +var localeTableBm = [5][]string{ + {"kar", "ntɛ", "tar", "ara", "ala", "jum", "sib"}, + {"kari", "ntɛnɛ", "tarata", "araba", "alamisa", "juma", "sibiri"}, + {"zan", "feb", "mar", "awi", "mɛ", "zuw", "zul", "uti", "sɛt", "ɔku", "now", "des"}, + {"zanwuye", "feburuye", "marisi", "awirili", "mɛ", "zuwɛn", "zuluye", "uti", "sɛtanburu", "ɔkutɔburu", "nowanburu", "desanburu"}, + {}, +} + +var localeTableBmML = [5][]string{ + {"kar", "ntɛ", "tar", "ara", "ala", "jum", "sib"}, + {"kari", "ntɛnɛ", "tarata", "araba", "alamisa", "juma", "sibiri"}, + {"zan", "feb", "mar", "awi", "mɛ", "zuw", "zul", "uti", "sɛt", "ɔku", "now", "des"}, + {"zanwuye", "feburuye", "marisi", "awirili", "mɛ", "zuwɛn", "zuluye", "uti", "sɛtanburu", "ɔkutɔburu", "nowanburu", "desanburu"}, + {}, +} + +var localeTableBn = [5][]string{ + {"রবি", "সোম", "মঙ্গল", "বুধ", "বৃহস্পতি", "শুক্র", "শনি"}, + {"রবিবার", "সোমবার", "মঙ্গলবার", "বুধবার", "বৃহস্পতিবার", "শুক্রবার", "শনিবার"}, + {"জানু", "ফেব", "মার্চ", "এপ্রি", "মে", "জুন", "জুল", "আগ", "সেপ", "অক্টো", "নভে", "ডিসে"}, + {"জানুয়ারী", "ফেব্রুয়ারী", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "আগস্ট", "সেপ্টেম্বর", "অক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {}, +} + +var localeTableBnBD = [5][]string{ + {"রবি", "সোম", "মঙ্গল", "বুধ", "বৃহস্পতি", "শুক্র", "শনি"}, + {"রবিবার", "সোমবার", "মঙ্গলবার", "বুধবার", "বৃহস্পতিবার", "শুক্রবার", "শনিবার"}, + {"জানু", "ফেব", "মার্চ", "এপ্রি", "মে", "জুন", "জুল", "আগ", "সেপ", "অক্টো", "নভে", "ডিসে"}, + {"জানুয়ারী", "ফেব্রুয়ারী", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "আগস্ট", "সেপ্টেম্বর", "অক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {}, +} + +var localeTableBnIN = [5][]string{ + {"রবি", "সোম", "মঙ্গল", "বুধ", "বৃহস্পতি", "শুক্র", "শনি"}, + {"রবিবার", "সোমবার", "মঙ্গলবার", "বুধবার", "বৃহস্পতিবার", "শুক্রবার", "শনিবার"}, + {"জানু", "ফেব", "মার্চ", "এপ্রি", "মে", "জুন", "জুল", "আগ", "সেপ্টেঃ", "অক্টোঃ", "নভেঃ", "ডিসেঃ"}, + {"জানুয়ারী", "ফেব্রুয়ারী", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "আগস্ট", "সেপ্টেম্বর", "অক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {}, +} + +var localeTableBo = [5][]string{ + {"ཉི་མ་", "ཟླ་བ་", "མིག་དམར་", "ལྷག་པ་", "ཕུར་བུ་", "པ་སངས་", "སྤེན་པ་"}, + {"གཟའ་ཉི་མ་", "གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་"}, + {"ཟླ་༡", "ཟླ་༢", "ཟླ་༣", "ཟླ་༤", "ཟླ་༥", "ཟླ་༦", "ཟླ་༧", "ཟླ་༨", "ཟླ་༩", "ཟླ་༡༠", "ཟླ་༡༡", "ཟླ་༡༢"}, + {"ཟླ་བ་དང་པོ", "ཟླ་བ་གཉིས་པ", "ཟླ་བ་གསུམ་པ", "ཟླ་བ་བཞི་པ", "ཟླ་བ་ལྔ་པ", "ཟླ་བ་དྲུག་པ", "ཟླ་བ་བདུན་པ", "ཟླ་བ་བརྒྱད་པ", "ཟླ་བ་དགུ་པ", "ཟླ་བ་བཅུ་པ", "ཟླ་བ་བཅུ་གཅིག་པ", "ཟླ་བ་བཅུ་གཉིས་པ"}, + {"སྔ་དྲོ་", "ཕྱི་དྲོ་"}, +} + +var localeTableBoCN = [5][]string{ + {"ཉི་མ་", "ཟླ་བ་", "མིག་དམར་", "ལྷག་པ་", "ཕུར་བུ་", "པ་སངས་", "སྤེན་པ་"}, + {"གཟའ་ཉི་མ་", "གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་"}, + {"ཟླ་༡", "ཟླ་༢", "ཟླ་༣", "ཟླ་༤", "ཟླ་༥", "ཟླ་༦", "ཟླ་༧", "ཟླ་༨", "ཟླ་༩", "ཟླ་༡༠", "ཟླ་༡༡", "ཟླ་༡༢"}, + {"ཟླ་བ་དང་པོ", "ཟླ་བ་གཉིས་པ", "ཟླ་བ་གསུམ་པ", "ཟླ་བ་བཞི་པ", "ཟླ་བ་ལྔ་པ", "ཟླ་བ་དྲུག་པ", "ཟླ་བ་བདུན་པ", "ཟླ་བ་བརྒྱད་པ", "ཟླ་བ་དགུ་པ", "ཟླ་བ་བཅུ་པ", "ཟླ་བ་བཅུ་གཅིག་པ", "ཟླ་བ་བཅུ་གཉིས་པ"}, + {"སྔ་དྲོ་", "ཕྱི་དྲོ་"}, +} + +var localeTableBoIN = [5][]string{ + {"ཉི་མ་", "ཟླ་བ་", "མིག་དམར་", "ལྷག་པ་", "ཕུར་བུ་", "པ་སངས་", "སྤེན་པ་"}, + {"གཟའ་ཉི་མ་", "གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་"}, + {"ཟླ་༡", "ཟླ་༢", "ཟླ་༣", "ཟླ་༤", "ཟླ་༥", "ཟླ་༦", "ཟླ་༧", "ཟླ་༨", "ཟླ་༩", "ཟླ་༡༠", "ཟླ་༡༡", "ཟླ་༡༢"}, + {"ཟླ་བ་དང་པོ", "ཟླ་བ་གཉིས་པ", "ཟླ་བ་གསུམ་པ", "ཟླ་བ་བཞི་པ", "ཟླ་བ་ལྔ་པ", "ཟླ་བ་དྲུག་པ", "ཟླ་བ་བདུན་པ", "ཟླ་བ་བརྒྱད་པ", "ཟླ་བ་དགུ་པ", "ཟླ་བ་བཅུ་པ", "ཟླ་བ་བཅུ་གཅིག་པ", "ཟླ་བ་བཅུ་གཉིས་པ"}, + {"སྔ་དྲོ་", "ཕྱི་དྲོ་"}, +} + +var localeTableBr = [5][]string{ + {"Sul", "Lun", "Meu.", "Mer.", "Yaou", "Gwe.", "Sad."}, + {"Sul", "Lun", "Meurzh", "Mercʼher", "Yaou", "Gwener", "Sadorn"}, + {"Gen.", "Cʼhwe.", "Meur.", "Ebr.", "Mae", "Mezh.", "Goue.", "Eost", "Gwen.", "Here", "Du", "Kzu."}, + {"Genver", "Cʼhwevrer", "Meurzh", "Ebrel", "Mae", "Mezheven", "Gouere", "Eost", "Gwengolo", "Here", "Du", "Kerzu"}, + {"A.M.", "G.M."}, +} + +var localeTableBrFR = [5][]string{ + {"Sul", "Lun", "Meu.", "Mer.", "Yaou", "Gwe.", "Sad."}, + {"Sul", "Lun", "Meurzh", "Mercʼher", "Yaou", "Gwener", "Sadorn"}, + {"Gen.", "Cʼhwe.", "Meur.", "Ebr.", "Mae", "Mezh.", "Goue.", "Eost", "Gwen.", "Here", "Du", "Kzu."}, + {"Genver", "Cʼhwevrer", "Meurzh", "Ebrel", "Mae", "Mezheven", "Gouere", "Eost", "Gwengolo", "Here", "Du", "Kerzu"}, + {"A.M.", "G.M."}, +} + +var localeTableBrx = [5][]string{ + {"रबि", "सम", "मंगल", "बुध", "बिस्थि", "सुखुर", "सनि"}, + {"रबिबार", "समबार", "मंगलबार", "बुधबार", "बिस्थिबार", "सुखुरबार", "सनिबार"}, + {"जान", "फेब", "मार्च", "एप्रि", "मे", "जुन", "जुल", "आग", "सेप", "अक्ट’", "नवे", "डिसे"}, + {"जानुवारी", "फेब्रूवारी", "मार्च", "एप्रिल", "मे", "जुन", "जुलाई", "आगष्ट", "सेप्थेम्बर", "अक्ट’बर", "नवेम्बर", "डिसेम्बर"}, + {"फुं", "बेलासे"}, +} + +var localeTableBrxIN = [5][]string{ + {"रबि", "सम", "मंगल", "बुध", "बिस्थि", "सुखुर", "सनि"}, + {"रबिबार", "समबार", "मंगलबार", "बुधबार", "बिस्थिबार", "सुखुरबार", "सनिबार"}, + {"जान", "फेब", "मार्च", "एप्रि", "मे", "जुन", "जुल", "आग", "सेप", "अक्ट’", "नवे", "डिसे"}, + {"जानुवारी", "फेब्रूवारी", "मार्च", "एप्रिल", "मे", "जुन", "जुलाई", "आगष्ट", "सेप्थेम्बर", "अक्ट’बर", "नवेम्बर", "डिसेम्बर"}, + {"फुं", "बेलासे"}, +} + +var localeTableBs = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "juni", "juli", "august", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableBsCyrl = [5][]string{ + {"нед", "пон", "уто", "сри", "чет", "пет", "суб"}, + {"недјеља", "понедјељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "ауг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јуни", "јули", "аугуст", "септембар", "октобар", "новембар", "децембар"}, + {"преподне", "поподне"}, +} + +var localeTableBsCyrlBA = [5][]string{ + {"нед", "пон", "уто", "сри", "чет", "пет", "суб"}, + {"недјеља", "понедјељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "ауг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јуни", "јули", "аугуст", "септембар", "октобар", "новембар", "децембар"}, + {"преподне", "поподне"}, +} + +var localeTableBsLatn = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "juni", "juli", "august", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableBsLatnBA = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "juni", "juli", "august", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableByn = [5][]string{ + {"ሰ/ቅ", "ሰኑ", "ሰሊጝ", "ለጓ", "ኣምድ", "ኣርብ", "ሰ/ሽ"}, + {"ሰንበር ቅዳዅ", "ሰኑ", "ሰሊጝ", "ለጓ ወሪ ለብዋ", "ኣምድ", "ኣርብ", "ሰንበር ሽጓዅ"}, + {"ልደት", "ካብኽ", "ክብላ", "ፋጅኺ", "ክቢቅ", "ም/ት", "ኰር", "ማርያ", "ያኸኒ", "መተሉ", "ም/ም", "ተሕሳ"}, + {"ልደትሪ", "ካብኽብቲ", "ክብላ", "ፋጅኺሪ", "ክቢቅሪ", "ምኪኤል ትጟኒሪ", "ኰርኩ", "ማርያም ትሪ", "ያኸኒ መሳቅለሪ", "መተሉ", "ምኪኤል መሽወሪ", "ተሕሳስሪ"}, + {"ፋዱስጃብ", "ፋዱስደምቢ"}, +} + +var localeTableBynER = [5][]string{ + {"ሰ/ቅ", "ሰኑ", "ሰሊጝ", "ለጓ", "ኣምድ", "ኣርብ", "ሰ/ሽ"}, + {"ሰንበር ቅዳዅ", "ሰኑ", "ሰሊጝ", "ለጓ ወሪ ለብዋ", "ኣምድ", "ኣርብ", "ሰንበር ሽጓዅ"}, + {"ልደት", "ካብኽ", "ክብላ", "ፋጅኺ", "ክቢቅ", "ም/ት", "ኰር", "ማርያ", "ያኸኒ", "መተሉ", "ም/ም", "ተሕሳ"}, + {"ልደትሪ", "ካብኽብቲ", "ክብላ", "ፋጅኺሪ", "ክቢቅሪ", "ምኪኤል ትጟኒሪ", "ኰርኩ", "ማርያም ትሪ", "ያኸኒ መሳቅለሪ", "መተሉ", "ምኪኤል መሽወሪ", "ተሕሳስሪ"}, + {"ፋዱስጃብ", "ፋዱስደምቢ"}, +} + +var localeTableCa = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaAD = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaES = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaESvalencia = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaFR = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCaIT = [5][]string{ + {"dg.", "dl.", "dt.", "dc.", "dj.", "dv.", "ds."}, + {"diumenge", "dilluns", "dimarts", "dimecres", "dijous", "divendres", "dissabte"}, + {"de gen.", "de febr.", "de març", "d’abr.", "de maig", "de juny", "de jul.", "d’ag.", "de set.", "d’oct.", "de nov.", "de des."}, + {"de gener", "de febrer", "de març", "d’abril", "de maig", "de juny", "de juliol", "d’agost", "de setembre", "d’octubre", "de novembre", "de desembre"}, + {"a.m.", "p.m."}, +} + +var localeTableCad = [5][]string{ + {}, + {"Inikuˀ", "Wísts’i hayashuh", "Bít hayashuh", "Dahó hayashuh", "Hiwí hayashuh", "Dissik’an hayashuh", "Inikuˀtiˀtiˀ"}, + {}, + {"Cháykáhday Haˀimay", "Tsahkápbiˀ", "Wánit", "Háshnihtiˀtiˀ", "Háshnih Haˀimay", "Háshnihtsiˀ", "Násˀahˀatsus", "Dahósikah nish", "Híisikah nish", "Nípbaatiˀtiˀ", "Nípbaa Haˀimay", "Cháykáhdaytiˀtiˀ"}, + {}, +} + +var localeTableCadUS = [5][]string{ + {}, + {"Inikuˀ", "Wísts’i hayashuh", "Bít hayashuh", "Dahó hayashuh", "Hiwí hayashuh", "Dissik’an hayashuh", "Inikuˀtiˀtiˀ"}, + {}, + {"Cháykáhday Haˀimay", "Tsahkápbiˀ", "Wánit", "Háshnihtiˀtiˀ", "Háshnih Haˀimay", "Háshnihtsiˀ", "Násˀahˀatsus", "Dahósikah nish", "Híisikah nish", "Nípbaatiˀtiˀ", "Nípbaa Haˀimay", "Cháykáhdaytiˀtiˀ"}, + {}, +} + +var localeTableCch = [5][]string{ + {"Yok", "Tung", "Gitung", "Tsan", "Nas", "Nat", "Chir"}, + {"Wai Yoka Bawai", "Wai Tunga", "Toki Gitung", "Tsam Kasuwa", "Wai Na Nas", "Wai Na Tiyon", "Wai Na Chirim"}, + {"Dyon", "Baa", "Atat", "Anas", "Atyo", "Achi", "Atar", "Awur", "Shad", "Shak", "Naba", "Nata"}, + {"Pen Dyon", "Pen Baʼa", "Pen Atat", "Pen Anas", "Pen Atyon", "Pen Achirim", "Pen Atariba", "Pen Awurr", "Pen Shadon", "Pen Shakur", "Pen Kur Naba", "Pen Kur Natat"}, + {}, +} + +var localeTableCchNG = [5][]string{ + {"Yok", "Tung", "Gitung", "Tsan", "Nas", "Nat", "Chir"}, + {"Wai Yoka Bawai", "Wai Tunga", "Toki Gitung", "Tsam Kasuwa", "Wai Na Nas", "Wai Na Tiyon", "Wai Na Chirim"}, + {"Dyon", "Baa", "Atat", "Anas", "Atyo", "Achi", "Atar", "Awur", "Shad", "Shak", "Naba", "Nata"}, + {"Pen Dyon", "Pen Baʼa", "Pen Atat", "Pen Anas", "Pen Atyon", "Pen Achirim", "Pen Atariba", "Pen Awurr", "Pen Shadon", "Pen Shakur", "Pen Kur Naba", "Pen Kur Natat"}, + {}, +} + +var localeTableCcp = [5][]string{ + {"𑄢𑄧𑄝𑄨", "𑄥𑄧𑄟𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴", "𑄝𑄪𑄖𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴", "𑄥𑄧𑄚𑄨"}, + {"𑄢𑄧𑄝𑄨𑄝𑄢𑄴", "𑄥𑄧𑄟𑄴𑄝𑄢𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴𑄝𑄢𑄴", "𑄝𑄪𑄖𑄴𑄝𑄢𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴𑄝𑄢𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴𑄝𑄢𑄴", "𑄥𑄧𑄚𑄨𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪", "𑄜𑄬𑄛𑄴", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄮𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪𑄠𑄢𑄨", "𑄜𑄬𑄛𑄴𑄝𑄳𑄢𑄪𑄠𑄢𑄨", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄬𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄧𑄢𑄴"}, + {}, +} + +var localeTableCcpBD = [5][]string{ + {"𑄢𑄧𑄝𑄨", "𑄥𑄧𑄟𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴", "𑄝𑄪𑄖𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴", "𑄥𑄧𑄚𑄨"}, + {"𑄢𑄧𑄝𑄨𑄝𑄢𑄴", "𑄥𑄧𑄟𑄴𑄝𑄢𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴𑄝𑄢𑄴", "𑄝𑄪𑄖𑄴𑄝𑄢𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴𑄝𑄢𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴𑄝𑄢𑄴", "𑄥𑄧𑄚𑄨𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪", "𑄜𑄬𑄛𑄴", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄮𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪𑄠𑄢𑄨", "𑄜𑄬𑄛𑄴𑄝𑄳𑄢𑄪𑄠𑄢𑄨", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄬𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄧𑄢𑄴"}, + {}, +} + +var localeTableCcpIN = [5][]string{ + {"𑄢𑄧𑄝𑄨", "𑄥𑄧𑄟𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴", "𑄝𑄪𑄖𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴", "𑄥𑄧𑄚𑄨"}, + {"𑄢𑄧𑄝𑄨𑄝𑄢𑄴", "𑄥𑄧𑄟𑄴𑄝𑄢𑄴", "𑄟𑄧𑄁𑄉𑄧𑄣𑄴𑄝𑄢𑄴", "𑄝𑄪𑄖𑄴𑄝𑄢𑄴", "𑄝𑄳𑄢𑄨𑄥𑄪𑄛𑄴𑄝𑄢𑄴", "𑄥𑄪𑄇𑄴𑄇𑄮𑄢𑄴𑄝𑄢𑄴", "𑄥𑄧𑄚𑄨𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪", "𑄜𑄬𑄛𑄴", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄮𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄢𑄴"}, + {"𑄎𑄚𑄪𑄠𑄢𑄨", "𑄜𑄬𑄛𑄴𑄝𑄳𑄢𑄪𑄠𑄢𑄨", "𑄟𑄢𑄴𑄌𑄧", "𑄃𑄬𑄛𑄳𑄢𑄨𑄣𑄴", "𑄟𑄬", "𑄎𑄪𑄚𑄴", "𑄎𑄪𑄣𑄭", "𑄃𑄉𑄧𑄌𑄴𑄑𑄴", "𑄥𑄬𑄛𑄴𑄑𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄃𑄧𑄇𑄴𑄑𑄬𑄝𑄧𑄢𑄴", "𑄚𑄧𑄞𑄬𑄟𑄴𑄝𑄧𑄢𑄴", "𑄓𑄨𑄥𑄬𑄟𑄴𑄝𑄧𑄢𑄴"}, + {}, +} + +var localeTableCe = [5][]string{ + {"кӀи", "ор", "ши", "кха", "еа", "пӀе", "шуо"}, + {"кӀира", "оршот", "шинара", "кхаара", "еара", "пӀераска", "шуот"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableCeRU = [5][]string{ + {"кӀи", "ор", "ши", "кха", "еа", "пӀе", "шуо"}, + {"кӀира", "оршот", "шинара", "кхаара", "еара", "пӀераска", "шуот"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableCeb = [5][]string{ + {"Dom", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Sep", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Septiyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"a", "p"}, +} + +var localeTableCebPH = [5][]string{ + {"Dom", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Sep", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Septiyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"a", "p"}, +} + +var localeTableCgg = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableCggUG = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableChr = [5][]string{ + {"ᏆᏍᎬ", "ᏉᏅᎯ", "ᏔᎵᏁ", "ᏦᎢᏁ", "ᏅᎩᏁ", "ᏧᎾᎩ", "ᏈᏕᎾ"}, + {"ᎤᎾᏙᏓᏆᏍᎬ", "ᎤᎾᏙᏓᏉᏅᎯ", "ᏔᎵᏁᎢᎦ", "ᏦᎢᏁᎢᎦ", "ᏅᎩᏁᎢᎦ", "ᏧᎾᎩᎶᏍᏗ", "ᎤᎾᏙᏓᏈᏕᎾ"}, + {"ᎤᏃ", "ᎧᎦ", "ᎠᏅ", "ᎧᏬ", "ᎠᏂ", "ᏕᎭ", "ᎫᏰ", "ᎦᎶ", "ᏚᎵ", "ᏚᏂ", "ᏅᏓ", "ᎥᏍ"}, + {"ᎤᏃᎸᏔᏅ", "ᎧᎦᎵ", "ᎠᏅᏱ", "ᎧᏬᏂ", "ᎠᏂᏍᎬᏘ", "ᏕᎭᎷᏱ", "ᎫᏰᏉᏂ", "ᎦᎶᏂ", "ᏚᎵᏍᏗ", "ᏚᏂᏅᏗ", "ᏅᏓᏕᏆ", "ᎥᏍᎩᏱ"}, + {"ᏌᎾᎴ", "ᏒᎯᏱᎢ"}, +} + +var localeTableChrUS = [5][]string{ + {"ᏆᏍᎬ", "ᏉᏅᎯ", "ᏔᎵᏁ", "ᏦᎢᏁ", "ᏅᎩᏁ", "ᏧᎾᎩ", "ᏈᏕᎾ"}, + {"ᎤᎾᏙᏓᏆᏍᎬ", "ᎤᎾᏙᏓᏉᏅᎯ", "ᏔᎵᏁᎢᎦ", "ᏦᎢᏁᎢᎦ", "ᏅᎩᏁᎢᎦ", "ᏧᎾᎩᎶᏍᏗ", "ᎤᎾᏙᏓᏈᏕᎾ"}, + {"ᎤᏃ", "ᎧᎦ", "ᎠᏅ", "ᎧᏬ", "ᎠᏂ", "ᏕᎭ", "ᎫᏰ", "ᎦᎶ", "ᏚᎵ", "ᏚᏂ", "ᏅᏓ", "ᎥᏍ"}, + {"ᎤᏃᎸᏔᏅ", "ᎧᎦᎵ", "ᎠᏅᏱ", "ᎧᏬᏂ", "ᎠᏂᏍᎬᏘ", "ᏕᎭᎷᏱ", "ᎫᏰᏉᏂ", "ᎦᎶᏂ", "ᏚᎵᏍᏗ", "ᏚᏂᏅᏗ", "ᏅᏓᏕᏆ", "ᎥᏍᎩᏱ"}, + {"ᏌᎾᎴ", "ᏒᎯᏱᎢ"}, +} + +var localeTableCic = [5][]string{ + {}, + {"Nittak Holloʼ", "Mantiʼ", "Chostiʼ", "Winstiʼ", "Soistiʼ", "Nannalhchifaʼ Nittak", "Nittak Holloʼ Nakfish"}, + {}, + {"Hashiʼ Ammoʼnaʼ", "Hashiʼ Atokloʼ", "Hashiʼ Atochchíʼnaʼ", "Iiplal", "Mih", "Choon", "Choola", "Akaas", "Siptimpaʼ", "Aaktopaʼ", "Nofimpaʼ", "Tiisimpaʼ"}, + {}, +} + +var localeTableCicUS = [5][]string{ + {}, + {"Nittak Holloʼ", "Mantiʼ", "Chostiʼ", "Winstiʼ", "Soistiʼ", "Nannalhchifaʼ Nittak", "Nittak Holloʼ Nakfish"}, + {}, + {"Hashiʼ Ammoʼnaʼ", "Hashiʼ Atokloʼ", "Hashiʼ Atochchíʼnaʼ", "Iiplal", "Mih", "Choon", "Choola", "Akaas", "Siptimpaʼ", "Aaktopaʼ", "Nofimpaʼ", "Tiisimpaʼ"}, + {}, +} + +var localeTableCkb = [5][]string{ + {}, + {"یەکشەممە", "دووشەممە", "سێشەممە", "چوارشەممە", "پێنجشەممە", "ھەینی", "شەممە"}, + {}, + {"کانوونی دووەم", "شوبات", "ئازار", "نیسان", "ئایار", "حوزەیران", "تەمووز", "ئاب", "ئەیلوول", "تشرینی یەکەم", "تشرینی دووەم", "کانونی یەکەم"}, + {"ب.ن", "د.ن"}, +} + +var localeTableCkbIQ = [5][]string{ + {}, + {"یەکشەممە", "دووشەممە", "سێشەممە", "چوارشەممە", "پێنجشەممە", "ھەینی", "شەممە"}, + {}, + {"کانوونی دووەم", "شوبات", "ئازار", "نیسان", "ئایار", "حوزەیران", "تەمووز", "ئاب", "ئەیلوول", "تشرینی یەکەم", "تشرینی دووەم", "کانونی یەکەم"}, + {"ب.ن", "د.ن"}, +} + +var localeTableCkbIR = [5][]string{ + {}, + {"یەکشەممە", "دووشەممە", "سێشەممە", "چوارشەممە", "پێنجشەممە", "ھەینی", "شەممە"}, + {}, + {"کانوونی دووەم", "شوبات", "ئازار", "نیسان", "ئایار", "حوزەیران", "تەمووز", "ئاب", "ئەیلوول", "تشرینی یەکەم", "تشرینی دووەم", "کانونی یەکەم"}, + {"ب.ن", "د.ن"}, +} + +var localeTableCo = [5][]string{ + {"dum.", "lun.", "mar.", "mer.", "ghj.", "ven.", "sab."}, + {"dumenica", "luni", "marti", "mercuri", "ghjovi", "venneri", "sabbatu"}, + {"ghj.", "fer.", "mar.", "apr.", "mag.", "ghju.", "lug.", "aos.", "sit.", "ott.", "nuv.", "dic."}, + {"di ghjennaghju", "di ferraghju", "di marzu", "d’aprile", "di maghju", "di ghjugnu", "di lugliu", "d’aostu", "di sittembre", "d’ottobre", "di nuvembre", "di dicembre"}, + {}, +} + +var localeTableCoFR = [5][]string{ + {"dum.", "lun.", "mar.", "mer.", "ghj.", "ven.", "sab."}, + {"dumenica", "luni", "marti", "mercuri", "ghjovi", "venneri", "sabbatu"}, + {"ghj.", "fer.", "mar.", "apr.", "mag.", "ghju.", "lug.", "aos.", "sit.", "ott.", "nuv.", "dic."}, + {"di ghjennaghju", "di ferraghju", "di marzu", "d’aprile", "di maghju", "di ghjugnu", "di lugliu", "d’aostu", "di sittembre", "d’ottobre", "di nuvembre", "di dicembre"}, + {}, +} + +var localeTableCs = [5][]string{ + {"ne", "po", "út", "st", "čt", "pá", "so"}, + {"neděle", "pondělí", "úterý", "středa", "čtvrtek", "pátek", "sobota"}, + {"led", "úno", "bře", "dub", "kvě", "čvn", "čvc", "srp", "zář", "říj", "lis", "pro"}, + {"ledna", "února", "března", "dubna", "května", "června", "července", "srpna", "září", "října", "listopadu", "prosince"}, + {"dop.", "odp."}, +} + +var localeTableCsCZ = [5][]string{ + {"ne", "po", "út", "st", "čt", "pá", "so"}, + {"neděle", "pondělí", "úterý", "středa", "čtvrtek", "pátek", "sobota"}, + {"led", "úno", "bře", "dub", "kvě", "čvn", "čvc", "srp", "zář", "říj", "lis", "pro"}, + {"ledna", "února", "března", "dubna", "května", "června", "července", "srpna", "září", "října", "listopadu", "prosince"}, + {"dop.", "odp."}, +} + +var localeTableCsw = [5][]string{ + {}, + {"ᐊᔭᒥᐦᐁᐃ ᑭᓯᑲᐤ", "ᐯᔭᐠᑭᓯᑲᐤ", "ᓂᓱᑭᓯᑲᐤ", "ᐊᐱᐦᑕᐘᐣ", "ᓀᐓᑭᓯᑲᐤ", "ᓂᔭᓇᓄᑭᓯᑲᐤ", "ᒪᑎᓄᐏᑭᓯᑲᐤ"}, + {}, + {"ᑭᓴᐱᓯᒼ", "ᒥᑭᓯᐏᐱᓯᒼ", "ᓂᐢᑭᐱᓯᒼ", "ᐊᓂᑭᐱᓯᒼ", "ᓴᑭᐸᑲᐏᐱᓯᒼ", "ᐸᐢᑲᐍᐦᐅᐱᓯᒼ", "ᐸᐢᑯᐏᐱᓯᒼ", "ᐅᐸᐦᐅᐏᐱᓯᒼ", "ᑕᐦᑿᑭᐱᓯᒼ", "ᐱᒪᐦᐊᒧᐏᐱᓯᒼ", "ᐊᑿᑎᓄᐏᐱᓯᒼ", "ᐸᐘᐢᒐᑲᓂᓹᐱᓯᒼ"}, + {}, +} + +var localeTableCswCA = [5][]string{ + {}, + {"ᐊᔭᒥᐦᐁᐃ ᑭᓯᑲᐤ", "ᐯᔭᐠᑭᓯᑲᐤ", "ᓂᓱᑭᓯᑲᐤ", "ᐊᐱᐦᑕᐘᐣ", "ᓀᐓᑭᓯᑲᐤ", "ᓂᔭᓇᓄᑭᓯᑲᐤ", "ᒪᑎᓄᐏᑭᓯᑲᐤ"}, + {}, + {"ᑭᓴᐱᓯᒼ", "ᒥᑭᓯᐏᐱᓯᒼ", "ᓂᐢᑭᐱᓯᒼ", "ᐊᓂᑭᐱᓯᒼ", "ᓴᑭᐸᑲᐏᐱᓯᒼ", "ᐸᐢᑲᐍᐦᐅᐱᓯᒼ", "ᐸᐢᑯᐏᐱᓯᒼ", "ᐅᐸᐦᐅᐏᐱᓯᒼ", "ᑕᐦᑿᑭᐱᓯᒼ", "ᐱᒪᐦᐊᒧᐏᐱᓯᒼ", "ᐊᑿᑎᓄᐏᐱᓯᒼ", "ᐸᐘᐢᒐᑲᓂᓹᐱᓯᒼ"}, + {}, +} + +var localeTableCu = [5][]string{ + {"ндⷧ҇ѧ", "пнⷣе", "втоⷬ҇", "срⷣе", "чеⷦ҇", "пѧⷦ҇", "сꙋⷠ҇"}, + {"недѣ́лѧ", "понедѣ́льникъ", "вто́рникъ", "среда̀", "четверто́къ", "пѧто́къ", "сꙋббѡ́та"}, + {"і҆аⷩ҇", "феⷡ҇", "маⷬ҇", "а҆пⷬ҇", "маꙵ", "і҆ꙋⷩ҇", "і҆ꙋⷧ҇", "а҆́ѵⷢ҇", "сеⷫ҇", "ѻ҆кⷮ", "ноеⷨ", "деⷦ҇"}, + {"і҆аннꙋа́рїа", "феврꙋа́рїа", "ма́рта", "а҆прі́ллїа", "ма́їа", "і҆ꙋ́нїа", "і҆ꙋ́лїа", "а҆́ѵгꙋста", "септе́мврїа", "ѻ҆ктѡ́врїа", "ное́мврїа", "деке́мврїа"}, + {"ДП", "ПП"}, +} + +var localeTableCuRU = [5][]string{ + {"ндⷧ҇ѧ", "пнⷣе", "втоⷬ҇", "срⷣе", "чеⷦ҇", "пѧⷦ҇", "сꙋⷠ҇"}, + {"недѣ́лѧ", "понедѣ́льникъ", "вто́рникъ", "среда̀", "четверто́къ", "пѧто́къ", "сꙋббѡ́та"}, + {"і҆аⷩ҇", "феⷡ҇", "маⷬ҇", "а҆пⷬ҇", "маꙵ", "і҆ꙋⷩ҇", "і҆ꙋⷧ҇", "а҆́ѵⷢ҇", "сеⷫ҇", "ѻ҆кⷮ", "ноеⷨ", "деⷦ҇"}, + {"і҆аннꙋа́рїа", "феврꙋа́рїа", "ма́рта", "а҆прі́ллїа", "ма́їа", "і҆ꙋ́нїа", "і҆ꙋ́лїа", "а҆́ѵгꙋста", "септе́мврїа", "ѻ҆ктѡ́врїа", "ное́мврїа", "деке́мврїа"}, + {"ДП", "ПП"}, +} + +var localeTableCv = [5][]string{ + {"выр.", "тун.", "ытл.", "юн.", "кӗҫ.", "эр.", "шӑм."}, + {"вырсарникун", "тунтикун", "ытларикун", "юнкун", "кӗҫнерникун", "эрнекун", "шӑматкун"}, + {"кӑр.", "нар.", "пуш", "ака", "ҫу", "ҫӗр.", "утӑ", "ҫур.", "авӑн", "юпа", "чӳк", "раш."}, + {"кӑрлач", "нарӑс", "пуш", "ака", "ҫу", "ҫӗртме", "утӑ", "ҫурла", "авӑн", "юпа", "чӳк", "раштав"}, + {}, +} + +var localeTableCvRU = [5][]string{ + {"выр.", "тун.", "ытл.", "юн.", "кӗҫ.", "эр.", "шӑм."}, + {"вырсарникун", "тунтикун", "ытларикун", "юнкун", "кӗҫнерникун", "эрнекун", "шӑматкун"}, + {"кӑр.", "нар.", "пуш", "ака", "ҫу", "ҫӗр.", "утӑ", "ҫур.", "авӑн", "юпа", "чӳк", "раш."}, + {"кӑрлач", "нарӑс", "пуш", "ака", "ҫу", "ҫӗртме", "утӑ", "ҫурла", "авӑн", "юпа", "чӳк", "раштав"}, + {}, +} + +var localeTableCy = [5][]string{ + {"Sul", "Llun", "Maw", "Mer", "Iau", "Gwen", "Sad"}, + {"Dydd Sul", "Dydd Llun", "Dydd Mawrth", "Dydd Mercher", "Dydd Iau", "Dydd Gwener", "Dydd Sadwrn"}, + {"Ion", "Chwef", "Maw", "Ebr", "Mai", "Meh", "Gorff", "Awst", "Medi", "Hyd", "Tach", "Rhag"}, + {"Ionawr", "Chwefror", "Mawrth", "Ebrill", "Mai", "Mehefin", "Gorffennaf", "Awst", "Medi", "Hydref", "Tachwedd", "Rhagfyr"}, + {"b", "h"}, +} + +var localeTableCyGB = [5][]string{ + {"Sul", "Llun", "Maw", "Mer", "Iau", "Gwen", "Sad"}, + {"Dydd Sul", "Dydd Llun", "Dydd Mawrth", "Dydd Mercher", "Dydd Iau", "Dydd Gwener", "Dydd Sadwrn"}, + {"Ion", "Chwef", "Maw", "Ebr", "Mai", "Meh", "Gorff", "Awst", "Medi", "Hyd", "Tach", "Rhag"}, + {"Ionawr", "Chwefror", "Mawrth", "Ebrill", "Mai", "Mehefin", "Gorffennaf", "Awst", "Medi", "Hydref", "Tachwedd", "Rhagfyr"}, + {"b", "h"}, +} + +var localeTableDa = [5][]string{ + {"søn.", "man.", "tirs.", "ons.", "tors.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marts", "april", "maj", "juni", "juli", "august", "september", "oktober", "november", "december"}, + {"a", "p"}, +} + +var localeTableDaDK = [5][]string{ + {"søn.", "man.", "tirs.", "ons.", "tors.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marts", "april", "maj", "juni", "juli", "august", "september", "oktober", "november", "december"}, + {"a", "p"}, +} + +var localeTableDaGL = [5][]string{ + {"søn.", "man.", "tirs.", "ons.", "tors.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marts", "april", "maj", "juni", "juli", "august", "september", "oktober", "november", "december"}, + {"a", "p"}, +} + +var localeTableDav = [5][]string{ + {"Jum", "Jim", "Kaw", "Kad", "Kan", "Kas", "Ngu"}, + {"Ituku ja jumwa", "Kuramuka jimweri", "Kuramuka kawi", "Kuramuka kadadu", "Kuramuka kana", "Kuramuka kasanu", "Kifula nguwo"}, + {"Imb", "Kaw", "Kad", "Kan", "Kas", "Kar", "Mfu", "Wun", "Ike", "Iku", "Imw", "Iwi"}, + {"Mori ghwa imbiri", "Mori ghwa kawi", "Mori ghwa kadadu", "Mori ghwa kana", "Mori ghwa kasanu", "Mori ghwa karandadu", "Mori ghwa mfungade", "Mori ghwa wunyanya", "Mori ghwa ikenda", "Mori ghwa ikumi", "Mori ghwa ikumi na imweri", "Mori ghwa ikumi na iwi"}, + {"LumalwaK", "lumalwap"}, +} + +var localeTableDavKE = [5][]string{ + {"Jum", "Jim", "Kaw", "Kad", "Kan", "Kas", "Ngu"}, + {"Ituku ja jumwa", "Kuramuka jimweri", "Kuramuka kawi", "Kuramuka kadadu", "Kuramuka kana", "Kuramuka kasanu", "Kifula nguwo"}, + {"Imb", "Kaw", "Kad", "Kan", "Kas", "Kar", "Mfu", "Wun", "Ike", "Iku", "Imw", "Iwi"}, + {"Mori ghwa imbiri", "Mori ghwa kawi", "Mori ghwa kadadu", "Mori ghwa kana", "Mori ghwa kasanu", "Mori ghwa karandadu", "Mori ghwa mfungade", "Mori ghwa wunyanya", "Mori ghwa ikenda", "Mori ghwa ikumi", "Mori ghwa ikumi na imweri", "Mori ghwa ikumi na iwi"}, + {"LumalwaK", "lumalwap"}, +} + +var localeTableDe = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeAT = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jän.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Jänner", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeBE = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeCH = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeDE = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeIT = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jän.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Jänner", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeLI = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {}, +} + +var localeTableDeLU = [5][]string{ + {"So.", "Mo.", "Di.", "Mi.", "Do.", "Fr.", "Sa."}, + {"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sept.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {"vorm.", "nachm."}, +} + +var localeTableDje = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamisi", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableDjeNE = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamisi", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableDoi = [5][]string{ + {"ऐत", "सोम", "मंगल", "बुध", "बीर", "शुक्र", "शनि"}, + {"ऐतबार", "सोमबार", "मंगलबार", "बुधबार", "बीरबार", "शुक्रबार", "शनिबार"}, + {"जन.", "फर.", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अग.", "सित.", "अक्तू.", "नव.", "दिस."}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"सवेर", "स’ञ"}, +} + +var localeTableDoiIN = [5][]string{ + {"ऐत", "सोम", "मंगल", "बुध", "बीर", "शुक्र", "शनि"}, + {"ऐतबार", "सोमबार", "मंगलबार", "बुधबार", "बीरबार", "शुक्रबार", "शनिबार"}, + {"जन.", "फर.", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अग.", "सित.", "अक्तू.", "नव.", "दिस."}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मेई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"सवेर", "स’ञ"}, +} + +var localeTableDsb = [5][]string{ + {"nje", "pón", "wał", "srj", "stw", "pět", "sob"}, + {"njeźela", "pónjeźele", "wałtora", "srjoda", "stwórtk", "pětk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "maj.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "maja", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "wótp."}, +} + +var localeTableDsbDE = [5][]string{ + {"nje", "pón", "wał", "srj", "stw", "pět", "sob"}, + {"njeźela", "pónjeźele", "wałtora", "srjoda", "stwórtk", "pětk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "maj.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "maja", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "wótp."}, +} + +var localeTableDua = [5][]string{ + {"ét", "mɔ́s", "kwa", "muk", "ŋgi", "ɗón", "esa"}, + {"éti", "mɔ́sú", "kwasú", "mukɔ́sú", "ŋgisú", "ɗónɛsú", "esaɓasú"}, + {"di", "ŋgɔn", "sɔŋ", "diɓ", "emi", "esɔ", "mad", "diŋ", "nyɛt", "may", "tin", "elá"}, + {"dimɔ́di", "ŋgɔndɛ", "sɔŋɛ", "diɓáɓá", "emiasele", "esɔpɛsɔpɛ", "madiɓɛ́díɓɛ́", "diŋgindi", "nyɛtɛki", "mayésɛ́", "tiníní", "eláŋgɛ́"}, + {"idiɓa", "ebyámu"}, +} + +var localeTableDuaCM = [5][]string{ + {"ét", "mɔ́s", "kwa", "muk", "ŋgi", "ɗón", "esa"}, + {"éti", "mɔ́sú", "kwasú", "mukɔ́sú", "ŋgisú", "ɗónɛsú", "esaɓasú"}, + {"di", "ŋgɔn", "sɔŋ", "diɓ", "emi", "esɔ", "mad", "diŋ", "nyɛt", "may", "tin", "elá"}, + {"dimɔ́di", "ŋgɔndɛ", "sɔŋɛ", "diɓáɓá", "emiasele", "esɔpɛsɔpɛ", "madiɓɛ́díɓɛ́", "diŋgindi", "nyɛtɛki", "mayésɛ́", "tiníní", "eláŋgɛ́"}, + {"idiɓa", "ebyámu"}, +} + +var localeTableDyo = [5][]string{ + {"Dim", "Ten", "Tal", "Ala", "Ara", "Arj", "Sib"}, + {"Dimas", "Teneŋ", "Talata", "Alarbay", "Aramisay", "Arjuma", "Sibiti"}, + {"Sa", "Fe", "Ma", "Ab", "Me", "Su", "Sú", "Ut", "Se", "Ok", "No", "De"}, + {"Sanvie", "Fébirie", "Mars", "Aburil", "Mee", "Sueŋ", "Súuyee", "Ut", "Settembar", "Oktobar", "Novembar", "Disambar"}, + {}, +} + +var localeTableDyoSN = [5][]string{ + {"Dim", "Ten", "Tal", "Ala", "Ara", "Arj", "Sib"}, + {"Dimas", "Teneŋ", "Talata", "Alarbay", "Aramisay", "Arjuma", "Sibiti"}, + {"Sa", "Fe", "Ma", "Ab", "Me", "Su", "Sú", "Ut", "Se", "Ok", "No", "De"}, + {"Sanvie", "Fébirie", "Mars", "Aburil", "Mee", "Sueŋ", "Súuyee", "Ut", "Settembar", "Oktobar", "Novembar", "Disambar"}, + {}, +} + +var localeTableDz = [5][]string{ + {"ཟླ་", "མིར་", "ལྷག་", "ཕུར་", "སངས་", "སྤེན་", "ཉི་"}, + {"གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་", "གཟའ་ཉི་མ་"}, + {"༡", "༢", "༣", "༤", "༥", "༦", "༧", "༨", "༩", "༡༠", "༡༡", "12"}, + {"ཟླ་དངཔ་", "ཟླ་གཉིས་པ་", "ཟླ་གསུམ་པ་", "ཟླ་བཞི་པ་", "ཟླ་ལྔ་པ་", "ཟླ་དྲུག་པ", "ཟླ་བདུན་པ་", "ཟླ་བརྒྱད་པ་", "ཟླ་དགུ་པ་", "ཟླ་བཅུ་པ་", "ཟླ་བཅུ་གཅིག་པ་", "ཟླ་བཅུ་གཉིས་པ་"}, + {"སྔ་ཆ་", "ཕྱི་ཆ་"}, +} + +var localeTableDzBT = [5][]string{ + {"ཟླ་", "མིར་", "ལྷག་", "ཕུར་", "སངས་", "སྤེན་", "ཉི་"}, + {"གཟའ་ཟླ་བ་", "གཟའ་མིག་དམར་", "གཟའ་ལྷག་པ་", "གཟའ་ཕུར་བུ་", "གཟའ་པ་སངས་", "གཟའ་སྤེན་པ་", "གཟའ་ཉི་མ་"}, + {"༡", "༢", "༣", "༤", "༥", "༦", "༧", "༨", "༩", "༡༠", "༡༡", "12"}, + {"ཟླ་དངཔ་", "ཟླ་གཉིས་པ་", "ཟླ་གསུམ་པ་", "ཟླ་བཞི་པ་", "ཟླ་ལྔ་པ་", "ཟླ་དྲུག་པ", "ཟླ་བདུན་པ་", "ཟླ་བརྒྱད་པ་", "ཟླ་དགུ་པ་", "ཟླ་བཅུ་པ་", "ཟླ་བཅུ་གཅིག་པ་", "ཟླ་བཅུ་གཉིས་པ་"}, + {"སྔ་ཆ་", "ཕྱི་ཆ་"}, +} + +var localeTableEbu = [5][]string{ + {"Kma", "Tat", "Ine", "Tan", "Arm", "Maa", "NMM"}, + {"Kiumia", "Njumatatu", "Njumaine", "Njumatano", "Aramithi", "Njumaa", "NJumamothii"}, + {"Mbe", "Kai", "Kat", "Kan", "Gat", "Gan", "Mug", "Knn", "Ken", "Iku", "Imw", "Igi"}, + {"Mweri wa mbere", "Mweri wa kaĩri", "Mweri wa kathatũ", "Mweri wa kana", "Mweri wa gatano", "Mweri wa gatantatũ", "Mweri wa mũgwanja", "Mweri wa kanana", "Mweri wa kenda", "Mweri wa ikũmi", "Mweri wa ikũmi na ũmwe", "Mweri wa ikũmi na Kaĩrĩ"}, + {"KI", "UT"}, +} + +var localeTableEbuKE = [5][]string{ + {"Kma", "Tat", "Ine", "Tan", "Arm", "Maa", "NMM"}, + {"Kiumia", "Njumatatu", "Njumaine", "Njumatano", "Aramithi", "Njumaa", "NJumamothii"}, + {"Mbe", "Kai", "Kat", "Kan", "Gat", "Gan", "Mug", "Knn", "Ken", "Iku", "Imw", "Igi"}, + {"Mweri wa mbere", "Mweri wa kaĩri", "Mweri wa kathatũ", "Mweri wa kana", "Mweri wa gatano", "Mweri wa gatantatũ", "Mweri wa mũgwanja", "Mweri wa kanana", "Mweri wa kenda", "Mweri wa ikũmi", "Mweri wa ikũmi na ũmwe", "Mweri wa ikũmi na Kaĩrĩ"}, + {"KI", "UT"}, +} + +var localeTableEe = [5][]string{ + {"kɔs", "dzo", "bla", "kuɖ", "yaw", "fiɖ", "mem"}, + {"kɔsiɖa", "dzoɖa", "blaɖa", "kuɖa", "yawoɖa", "fiɖa", "memleɖa"}, + {"dzv", "dzd", "ted", "afɔ", "dam", "mas", "sia", "dea", "any", "kel", "ade", "dzm"}, + {"dzove", "dzodze", "tedoxe", "afɔfĩe", "dama", "masa", "siamlɔm", "deasiamime", "anyɔnyɔ", "kele", "adeɛmekpɔxe", "dzome"}, + {"ŋdi", "ɣetrɔ"}, +} + +var localeTableEeGH = [5][]string{ + {"kɔs", "dzo", "bla", "kuɖ", "yaw", "fiɖ", "mem"}, + {"kɔsiɖa", "dzoɖa", "blaɖa", "kuɖa", "yawoɖa", "fiɖa", "memleɖa"}, + {"dzv", "dzd", "ted", "afɔ", "dam", "mas", "sia", "dea", "any", "kel", "ade", "dzm"}, + {"dzove", "dzodze", "tedoxe", "afɔfĩe", "dama", "masa", "siamlɔm", "deasiamime", "anyɔnyɔ", "kele", "adeɛmekpɔxe", "dzome"}, + {"ŋdi", "ɣetrɔ"}, +} + +var localeTableEeTG = [5][]string{ + {"kɔs", "dzo", "bla", "kuɖ", "yaw", "fiɖ", "mem"}, + {"kɔsiɖa", "dzoɖa", "blaɖa", "kuɖa", "yawoɖa", "fiɖa", "memleɖa"}, + {"dzv", "dzd", "ted", "afɔ", "dam", "mas", "sia", "dea", "any", "kel", "ade", "dzm"}, + {"dzove", "dzodze", "tedoxe", "afɔfĩe", "dama", "masa", "siamlɔm", "deasiamime", "anyɔnyɔ", "kele", "adeɛmekpɔxe", "dzome"}, + {"ŋdi", "ɣetrɔ"}, +} + +var localeTableEl = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αυγ", "Σεπ", "Οκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αυγούστου", "Σεπτεμβρίου", "Οκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableElCY = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αυγ", "Σεπ", "Οκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αυγούστου", "Σεπτεμβρίου", "Οκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableElGR = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αυγ", "Σεπ", "Οκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αυγούστου", "Σεπτεμβρίου", "Οκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableElpolyton = [5][]string{ + {"Κυρ", "Δευ", "Τρί", "Τετ", "Πέμ", "Παρ", "Σάβ"}, + {"Κυριακή", "Δευτέρα", "Τρίτη", "Τετάρτη", "Πέμπτη", "Παρασκευή", "Σάββατο"}, + {"Ιαν", "Φεβ", "Μαρ", "Απρ", "Μαΐ", "Ιουν", "Ιουλ", "Αὐγ", "Σεπ", "Ὀκτ", "Νοε", "Δεκ"}, + {"Ιανουαρίου", "Φεβρουαρίου", "Μαρτίου", "Απριλίου", "Μαΐου", "Ιουνίου", "Ιουλίου", "Αὐγούστου", "Σεπτεμβρίου", "Ὀκτωβρίου", "Νοεμβρίου", "Δεκεμβρίου"}, + {"π.μ.", "μ.μ."}, +} + +var localeTableEn = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEn001 = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEn150 = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnAG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnAT = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnAU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBB = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnBM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnBZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCA = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"a.m.", "p.m."}, +} + +var localeTableEnCC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCX = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnCY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnDsrt = [5][]string{ + {"𐐝𐐲𐑌", "𐐣𐐲𐑌", "𐐓𐐭𐑆", "𐐎𐐯𐑌", "𐐛𐐲𐑉", "𐐙𐑉𐐴", "𐐝𐐰𐐻"}, + {"𐐝𐐲𐑌𐐼𐐩", "𐐣𐐲𐑌𐐼𐐩", "𐐓𐐭𐑆𐐼𐐩", "𐐎𐐯𐑌𐑆𐐼𐐩", "𐐛𐐲𐑉𐑆𐐼𐐩", "𐐙𐑉𐐴𐐼𐐩", "𐐝𐐰𐐻𐐲𐑉𐐼𐐩"}, + {"𐐖𐐰𐑌", "𐐙𐐯𐐺", "𐐣𐐪𐑉", "𐐁𐐹𐑉", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊", "𐐂𐑀", "𐐝𐐯𐐹", "𐐉𐐿𐐻", "𐐤𐐬𐑂", "𐐔𐐨𐑅"}, + {"𐐖𐐰𐑌𐐷𐐭𐐯𐑉𐐨", "𐐙𐐯𐐺𐑉𐐭𐐯𐑉𐐨", "𐐣𐐪𐑉𐐽", "𐐁𐐹𐑉𐐮𐑊", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊𐐴", "𐐂𐑀𐐲𐑅𐐻", "𐐝𐐯𐐹𐐻𐐯𐑋𐐺𐐲𐑉", "𐐉𐐿𐐻𐐬𐐺𐐲𐑉", "𐐤𐐬𐑂𐐯𐑋𐐺𐐲𐑉", "𐐔𐐨𐑅𐐯𐑋𐐺𐐲𐑉"}, + {"𐐈𐐣", "𐐑𐐣"}, +} + +var localeTableEnDsrtUS = [5][]string{ + {"𐐝𐐲𐑌", "𐐣𐐲𐑌", "𐐓𐐭𐑆", "𐐎𐐯𐑌", "𐐛𐐲𐑉", "𐐙𐑉𐐴", "𐐝𐐰𐐻"}, + {"𐐝𐐲𐑌𐐼𐐩", "𐐣𐐲𐑌𐐼𐐩", "𐐓𐐭𐑆𐐼𐐩", "𐐎𐐯𐑌𐑆𐐼𐐩", "𐐛𐐲𐑉𐑆𐐼𐐩", "𐐙𐑉𐐴𐐼𐐩", "𐐝𐐰𐐻𐐲𐑉𐐼𐐩"}, + {"𐐖𐐰𐑌", "𐐙𐐯𐐺", "𐐣𐐪𐑉", "𐐁𐐹𐑉", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊", "𐐂𐑀", "𐐝𐐯𐐹", "𐐉𐐿𐐻", "𐐤𐐬𐑂", "𐐔𐐨𐑅"}, + {"𐐖𐐰𐑌𐐷𐐭𐐯𐑉𐐨", "𐐙𐐯𐐺𐑉𐐭𐐯𐑉𐐨", "𐐣𐐪𐑉𐐽", "𐐁𐐹𐑉𐐮𐑊", "𐐣𐐩", "𐐖𐐭𐑌", "𐐖𐐭𐑊𐐴", "𐐂𐑀𐐲𐑅𐐻", "𐐝𐐯𐐹𐐻𐐯𐑋𐐺𐐲𐑉", "𐐉𐐿𐐻𐐬𐐺𐐲𐑉", "𐐤𐐬𐑂𐐯𐑋𐐺𐐲𐑉", "𐐔𐐨𐑅𐐯𐑋𐐺𐐲𐑉"}, + {"𐐈𐐣", "𐐑𐐣"}, +} + +var localeTableEnER = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFJ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnFM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGB = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGD = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnGU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnGY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnHK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnID = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnIE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIL = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIN = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnIO = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnJE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnJM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKN = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnKY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnLC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnLR = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnLS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnMO = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMP = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnMS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMT = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMV = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnMW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnMY = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNA = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNF = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNL = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNR = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnNZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPN = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnPR = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnPW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnRW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSB = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSD = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSE = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSH = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSL = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSX = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnSZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnShaw = [5][]string{ + {"·𐑕𐑭", "·𐑥𐑭", "·𐑑𐑵", "·𐑢𐑧", "·𐑔𐑻", "·𐑓𐑮", "·𐑕𐑨"}, + {"·𐑕𐑭𐑙𐑛𐑱", "·𐑥𐑭𐑙𐑛𐑱", "·𐑑𐑵𐑟𐑛𐑱", "·𐑢𐑧𐑙𐑟𐑛𐑱", "·𐑔𐑻𐑟𐑛𐑱", "·𐑓𐑮𐑲𐑛𐑱", "·𐑕𐑨𐑛𐑻𐑛𐑱"}, + {"·𐑡𐑨", "·𐑓𐑧", "·𐑥𐑸", "·𐑱𐑐", "·𐑥𐑱", "·𐑡𐑵", "·𐑡𐑫", "·𐑪𐑜", "·𐑕𐑧", "·𐑷𐑒", "·𐑯𐑴", "·𐑛𐑭"}, + {"·𐑡𐑨𐑙𐑘𐑭𐑢𐑺𐑰", "·𐑓𐑧𐑚𐑘𐑵𐑢𐑺𐑰", "·𐑥𐑸𐑗", "·𐑱𐑐𐑮𐑭𐑤", "·𐑥𐑱", "·𐑡𐑵𐑯", "·𐑡𐑫𐑤𐑲", "·𐑪𐑜𐑭𐑕𐑑", "·𐑕𐑧𐑐𐑑𐑧𐑥𐑚𐑸", "·𐑷𐑒𐑑𐑴𐑚𐑸", "·𐑯𐑴𐑝𐑧𐑥𐑚𐑸", "·𐑛𐑭𐑕𐑧𐑥𐑚𐑸"}, + {"𐑨𐑥", "𐑐𐑥"}, +} + +var localeTableEnShawGB = [5][]string{ + {"·𐑕𐑭", "·𐑥𐑭", "·𐑑𐑵", "·𐑢𐑧", "·𐑔𐑻", "·𐑓𐑮", "·𐑕𐑨"}, + {"·𐑕𐑭𐑙𐑛𐑱", "·𐑥𐑭𐑙𐑛𐑱", "·𐑑𐑵𐑟𐑛𐑱", "·𐑢𐑧𐑙𐑟𐑛𐑱", "·𐑔𐑻𐑟𐑛𐑱", "·𐑓𐑮𐑲𐑛𐑱", "·𐑕𐑨𐑛𐑻𐑛𐑱"}, + {"·𐑡𐑨", "·𐑓𐑧", "·𐑥𐑸", "·𐑱𐑐", "·𐑥𐑱", "·𐑡𐑵", "·𐑡𐑫", "·𐑪𐑜", "·𐑕𐑧", "·𐑷𐑒", "·𐑯𐑴", "·𐑛𐑭"}, + {"·𐑡𐑨𐑙𐑘𐑭𐑢𐑺𐑰", "·𐑓𐑧𐑚𐑘𐑵𐑢𐑺𐑰", "·𐑥𐑸𐑗", "·𐑱𐑐𐑮𐑭𐑤", "·𐑥𐑱", "·𐑡𐑵𐑯", "·𐑡𐑫𐑤𐑲", "·𐑪𐑜𐑭𐑕𐑑", "·𐑕𐑧𐑐𐑑𐑧𐑥𐑚𐑸", "·𐑷𐑒𐑑𐑴𐑚𐑸", "·𐑯𐑴𐑝𐑧𐑥𐑚𐑸", "·𐑛𐑭𐑕𐑧𐑥𐑚𐑸"}, + {"𐑨𐑥", "𐑐𐑥"}, +} + +var localeTableEnTC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTK = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTO = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTT = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTV = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnTZ = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnUG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnUM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnUS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnUSuvaposix = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnVC = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnVG = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnVI = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"AM", "PM"}, +} + +var localeTableEnVU = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnWS = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnZA = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnZM = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEnZW = [5][]string{ + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sept", "Oct", "Nov", "Dec"}, + {"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"}, + {"am", "pm"}, +} + +var localeTableEo = [5][]string{ + {"di", "lu", "ma", "me", "ĵa", "ve", "sa"}, + {"dimanĉo", "lundo", "mardo", "merkredo", "ĵaŭdo", "vendredo", "sabato"}, + {"Jan", "Feb", "Mar", "Apr", "Maj", "Jun", "Jul", "Aŭg", "Sep", "Okt", "Nov", "Dec"}, + {"Januaro", "Februaro", "Marto", "Aprilo", "Majo", "Junio", "Julio", "Aŭgusto", "Septembro", "Oktobro", "Novembro", "Decembro"}, + {"atm", "ptm"}, +} + +var localeTableEo001 = [5][]string{ + {"di", "lu", "ma", "me", "ĵa", "ve", "sa"}, + {"dimanĉo", "lundo", "mardo", "merkredo", "ĵaŭdo", "vendredo", "sabato"}, + {"Jan", "Feb", "Mar", "Apr", "Maj", "Jun", "Jul", "Aŭg", "Sep", "Okt", "Nov", "Dec"}, + {"Januaro", "Februaro", "Marto", "Aprilo", "Majo", "Junio", "Julio", "Aŭgusto", "Septembro", "Oktobro", "Novembro", "Decembro"}, + {"atm", "ptm"}, +} + +var localeTableEs = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEs419 = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsAR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsBO = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsBR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsBZ = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCL = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCO = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsCU = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsDO = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsEA = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsEC = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsES = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsGQ = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsGT = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsHN = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsIC = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsMX = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sep", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsNI = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPA = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPE = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "set.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "setiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPH = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPR = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsPY = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "sept.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsSV = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsUS = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sept", "oct", "nov", "dic"}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsUY = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "set.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "setiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEsVE = [5][]string{ + {"dom", "lun", "mar", "mié", "jue", "vie", "sáb"}, + {"domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado"}, + {"ene.", "feb.", "mar.", "abr.", "may.", "jun.", "jul.", "ago.", "sept.", "oct.", "nov.", "dic."}, + {"enero", "febrero", "marzo", "abril", "mayo", "junio", "julio", "agosto", "septiembre", "octubre", "noviembre", "diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableEt = [5][]string{ + {"P", "E", "T", "K", "N", "R", "L"}, + {"pühapäev", "esmaspäev", "teisipäev", "kolmapäev", "neljapäev", "reede", "laupäev"}, + {"jaan", "veebr", "märts", "apr", "mai", "juuni", "juuli", "aug", "sept", "okt", "nov", "dets"}, + {"jaanuar", "veebruar", "märts", "aprill", "mai", "juuni", "juuli", "august", "september", "oktoober", "november", "detsember"}, + {}, +} + +var localeTableEtEE = [5][]string{ + {"P", "E", "T", "K", "N", "R", "L"}, + {"pühapäev", "esmaspäev", "teisipäev", "kolmapäev", "neljapäev", "reede", "laupäev"}, + {"jaan", "veebr", "märts", "apr", "mai", "juuni", "juuli", "aug", "sept", "okt", "nov", "dets"}, + {"jaanuar", "veebruar", "märts", "aprill", "mai", "juuni", "juuli", "august", "september", "oktoober", "november", "detsember"}, + {}, +} + +var localeTableEu = [5][]string{ + {"ig.", "al.", "ar.", "az.", "og.", "or.", "lr."}, + {"igandea", "astelehena", "asteartea", "asteazkena", "osteguna", "ostirala", "larunbata"}, + {"urt.", "ots.", "mar.", "api.", "mai.", "eka.", "uzt.", "abu.", "ira.", "urr.", "aza.", "abe."}, + {"urtarrila", "otsaila", "martxoa", "apirila", "maiatza", "ekaina", "uztaila", "abuztua", "iraila", "urria", "azaroa", "abendua"}, + {"g", "a"}, +} + +var localeTableEuES = [5][]string{ + {"ig.", "al.", "ar.", "az.", "og.", "or.", "lr."}, + {"igandea", "astelehena", "asteartea", "asteazkena", "osteguna", "ostirala", "larunbata"}, + {"urt.", "ots.", "mar.", "api.", "mai.", "eka.", "uzt.", "abu.", "ira.", "urr.", "aza.", "abe."}, + {"urtarrila", "otsaila", "martxoa", "apirila", "maiatza", "ekaina", "uztaila", "abuztua", "iraila", "urria", "azaroa", "abendua"}, + {"g", "a"}, +} + +var localeTableEwo = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "fúl", "sér"}, + {"sɔ́ndɔ", "mɔ́ndi", "sɔ́ndɔ məlú mə́bɛ̌", "sɔ́ndɔ məlú mə́lɛ́", "sɔ́ndɔ məlú mə́nyi", "fúladé", "séradé"}, + {"ngo", "ngb", "ngl", "ngn", "ngt", "ngs", "ngz", "ngm", "nge", "nga", "ngad", "ngab"}, + {"ngɔn osú", "ngɔn bɛ̌", "ngɔn lála", "ngɔn nyina", "ngɔn tána", "ngɔn saməna", "ngɔn zamgbála", "ngɔn mwom", "ngɔn ebulú", "ngɔn awóm", "ngɔn awóm ai dziá", "ngɔn awóm ai bɛ̌"}, + {"kíkíríg", "ngəgógəle"}, +} + +var localeTableEwoCM = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "fúl", "sér"}, + {"sɔ́ndɔ", "mɔ́ndi", "sɔ́ndɔ məlú mə́bɛ̌", "sɔ́ndɔ məlú mə́lɛ́", "sɔ́ndɔ məlú mə́nyi", "fúladé", "séradé"}, + {"ngo", "ngb", "ngl", "ngn", "ngt", "ngs", "ngz", "ngm", "nge", "nga", "ngad", "ngab"}, + {"ngɔn osú", "ngɔn bɛ̌", "ngɔn lála", "ngɔn nyina", "ngɔn tána", "ngɔn saməna", "ngɔn zamgbála", "ngɔn mwom", "ngɔn ebulú", "ngɔn awóm", "ngɔn awóm ai dziá", "ngɔn awóm ai bɛ̌"}, + {"kíkíríg", "ngəgógəle"}, +} + +var localeTableFa = [5][]string{ + {}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ژانویهٔ", "فوریهٔ", "مارس", "آوریل", "مهٔ", "ژوئن", "ژوئیهٔ", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ق.ظ.", "ب.ظ."}, +} + +var localeTableFaAF = [5][]string{ + {}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"جنو", "فبروری", "مارچ", "اپریل", "می", "جون", "جول", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسم"}, + {"جنوری", "فبروری", "مارچ", "اپریل", "می", "جون", "جولای", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"ق.ظ.", "ب.ظ."}, +} + +var localeTableFaIR = [5][]string{ + {}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ژانویهٔ", "فوریهٔ", "مارس", "آوریل", "مهٔ", "ژوئن", "ژوئیهٔ", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {"ق.ظ.", "ب.ظ."}, +} + +var localeTableFf = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfAdlm = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmBF = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmCM = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGH = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGM = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGN = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmGW = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmLR = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmMR = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmNE = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmNG = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmSL = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfAdlmSN = [5][]string{ + {"𞤈𞤫𞤬", "𞤀𞥄𞤩𞤵", "𞤃𞤢𞤦", "𞤔𞤫𞤧", "𞤐𞤢𞥄𞤧", "𞤃𞤢𞤣", "𞤖𞤮𞤪"}, + {"𞤈𞤫𞤬𞤦𞤭𞤪𞥆𞤫", "𞤀𞥄𞤩𞤵𞤲𞥋𞤣𞤫", "𞤃𞤢𞤱𞤦𞤢𞥄𞤪𞤫", "𞤐𞤶𞤫𞤧𞤤𞤢𞥄𞤪𞤫", "𞤐𞤢𞥄𞤧𞤢𞥄𞤲𞤣𞤫", "𞤃𞤢𞤱𞤲𞤣𞤫", "𞤖𞤮𞤪𞤦𞤭𞤪𞥆𞤫"}, + {}, + {"𞤅𞤭𞥅𞤤𞤮", "𞤕𞤮𞤤𞤼𞤮", "𞤐𞤦𞤮𞥅𞤴𞤮", "𞤅𞤫𞥅𞤼𞤮", "𞤁𞤵𞥅𞤶𞤮", "𞤑𞤮𞤪𞤧𞤮", "𞤃𞤮𞤪𞤧𞤮", "𞤔𞤵𞤳𞤮", "𞤅𞤭𞤤𞤼𞤮", "𞤒𞤢𞤪𞤳𞤮", "𞤔𞤮𞤤𞤮", "𞤄𞤮𞤱𞤼𞤮"}, + {"𞤀𞤎", "𞤇𞤎"}, +} + +var localeTableFfLatn = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnBF = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnCM = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGH = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGM = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGN = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnGW = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnLR = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnMR = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnNE = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnNG = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnSL = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFfLatnSN = [5][]string{ + {"dew", "aaɓ", "maw", "nje", "naa", "mwd", "hbi"}, + {"dewo", "aaɓnde", "mawbaare", "njeslaare", "naasaande", "mawnde", "hoore-biir"}, + {"sii", "col", "mbo", "see", "duu", "kor", "mor", "juk", "slt", "yar", "jol", "bow"}, + {"siilo", "colte", "mbooy", "seeɗto", "duujal", "korse", "morso", "juko", "siilto", "yarkomaa", "jolal", "bowte"}, + {"subaka", "kikiiɗe"}, +} + +var localeTableFi = [5][]string{ + {"su", "ma", "ti", "ke", "to", "pe", "la"}, + {"sunnuntaina", "maanantaina", "tiistaina", "keskiviikkona", "torstaina", "perjantaina", "lauantaina"}, + {"tammik.", "helmik.", "maalisk.", "huhtik.", "toukok.", "kesäk.", "heinäk.", "elok.", "syysk.", "lokak.", "marrask.", "jouluk."}, + {"tammikuuta", "helmikuuta", "maaliskuuta", "huhtikuuta", "toukokuuta", "kesäkuuta", "heinäkuuta", "elokuuta", "syyskuuta", "lokakuuta", "marraskuuta", "joulukuuta"}, + {"ap.", "ip."}, +} + +var localeTableFiFI = [5][]string{ + {"su", "ma", "ti", "ke", "to", "pe", "la"}, + {"sunnuntaina", "maanantaina", "tiistaina", "keskiviikkona", "torstaina", "perjantaina", "lauantaina"}, + {"tammik.", "helmik.", "maalisk.", "huhtik.", "toukok.", "kesäk.", "heinäk.", "elok.", "syysk.", "lokak.", "marrask.", "jouluk."}, + {"tammikuuta", "helmikuuta", "maaliskuuta", "huhtikuuta", "toukokuuta", "kesäkuuta", "heinäkuuta", "elokuuta", "syyskuuta", "lokakuuta", "marraskuuta", "joulukuuta"}, + {"ap.", "ip."}, +} + +var localeTableFil = [5][]string{ + {"Lin", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Linggo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Set", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Setyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"am", "pm"}, +} + +var localeTableFilPH = [5][]string{ + {"Lin", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab"}, + {"Linggo", "Lunes", "Martes", "Miyerkules", "Huwebes", "Biyernes", "Sabado"}, + {"Ene", "Peb", "Mar", "Abr", "May", "Hun", "Hul", "Ago", "Set", "Okt", "Nob", "Dis"}, + {"Enero", "Pebrero", "Marso", "Abril", "Mayo", "Hunyo", "Hulyo", "Agosto", "Setyembre", "Oktubre", "Nobyembre", "Disyembre"}, + {"am", "pm"}, +} + +var localeTableFo = [5][]string{ + {"sun.", "mán.", "týs.", "mik.", "hós.", "frí.", "ley."}, + {"sunnudagur", "mánadagur", "týsdagur", "mikudagur", "hósdagur", "fríggjadagur", "leygardagur"}, + {"jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "apríl", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {}, +} + +var localeTableFoDK = [5][]string{ + {"sun.", "mán.", "týs.", "mik.", "hós.", "frí.", "ley."}, + {"sunnudagur", "mánadagur", "týsdagur", "mikudagur", "hósdagur", "fríggjadagur", "leygardagur"}, + {"jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "apríl", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {}, +} + +var localeTableFoFO = [5][]string{ + {"sun.", "mán.", "týs.", "mik.", "hós.", "frí.", "ley."}, + {"sunnudagur", "mánadagur", "týsdagur", "mikudagur", "hósdagur", "fríggjadagur", "leygardagur"}, + {"jan.", "feb.", "mar.", "apr.", "mai", "jun.", "jul.", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "apríl", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {}, +} + +var localeTableFr = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBE = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBI = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBJ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrBL = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCA = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juill.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {"a.m.", "p.m."}, +} + +var localeTableFrCD = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCG = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCH = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCI = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrCM = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {"mat.", "soir"}, +} + +var localeTableFrDJ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrDZ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrFR = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGA = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGN = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGP = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrGQ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrHT = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrKM = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrLU = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMA = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"jan.", "fév.", "mar.", "avr.", "mai", "jui.", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMC = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMG = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrML = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMQ = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMR = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrMU = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrNC = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrNE = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrPF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrPM = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrRE = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrRW = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrSC = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrSN = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrSY = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrTD = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrTG = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrTN = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrVU = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrWF = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrYT = [5][]string{ + {"dim.", "lun.", "mar.", "mer.", "jeu.", "ven.", "sam."}, + {"dimanche", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi"}, + {"janv.", "févr.", "mars", "avr.", "mai", "juin", "juil.", "août", "sept.", "oct.", "nov.", "déc."}, + {"janvier", "février", "mars", "avril", "mai", "juin", "juillet", "août", "septembre", "octobre", "novembre", "décembre"}, + {}, +} + +var localeTableFrr = [5][]string{ + {"Sön", "Mun", "Tei", "Wed", "Tür", "Fre", "San"}, + {"Söndai", "Mundai", "Teisdai", "Weedensdai", "Tüürsdai", "Freidai", "Saninj"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jün", "Jül", "Aug", "Sep", "Okt", "Nof", "Det"}, + {"Janewoore", "Febrewoore", "Maarts", "April", "Mei", "Jüüne", "Jüüle", "August", "September", "Oktuuber", "Nofember", "Detsember"}, + {"i/m", "e/m"}, +} + +var localeTableFrrDE = [5][]string{ + {"Sön", "Mun", "Tei", "Wed", "Tür", "Fre", "San"}, + {"Söndai", "Mundai", "Teisdai", "Weedensdai", "Tüürsdai", "Freidai", "Saninj"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jün", "Jül", "Aug", "Sep", "Okt", "Nof", "Det"}, + {"Janewoore", "Febrewoore", "Maarts", "April", "Mei", "Jüüne", "Jüüle", "August", "September", "Oktuuber", "Nofember", "Detsember"}, + {"i/m", "e/m"}, +} + +var localeTableFur = [5][]string{ + {"dom", "lun", "mar", "mie", "joi", "vin", "sab"}, + {"domenie", "lunis", "martars", "miercus", "joibe", "vinars", "sabide"}, + {"Zen", "Fev", "Mar", "Avr", "Mai", "Jug", "Lui", "Avo", "Set", "Otu", "Nov", "Dic"}, + {"Zenâr", "Fevrâr", "Març", "Avrîl", "Mai", "Jugn", "Lui", "Avost", "Setembar", "Otubar", "Novembar", "Dicembar"}, + {"a.", "p."}, +} + +var localeTableFurIT = [5][]string{ + {"dom", "lun", "mar", "mie", "joi", "vin", "sab"}, + {"domenie", "lunis", "martars", "miercus", "joibe", "vinars", "sabide"}, + {"Zen", "Fev", "Mar", "Avr", "Mai", "Jug", "Lui", "Avo", "Set", "Otu", "Nov", "Dic"}, + {"Zenâr", "Fevrâr", "Març", "Avrîl", "Mai", "Jugn", "Lui", "Avost", "Setembar", "Otubar", "Novembar", "Dicembar"}, + {"a.", "p."}, +} + +var localeTableFy = [5][]string{ + {"si", "mo", "ti", "wo", "to", "fr", "so"}, + {"snein", "moandei", "tiisdei", "woansdei", "tongersdei", "freed", "sneon"}, + {"Jan", "Feb", "Mrt", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Des"}, + {"Jannewaris", "Febrewaris", "Maart", "April", "Maaie", "Juny", "July", "Augustus", "Septimber", "Oktober", "Novimber", "Desimber"}, + {}, +} + +var localeTableFyNL = [5][]string{ + {"si", "mo", "ti", "wo", "to", "fr", "so"}, + {"snein", "moandei", "tiisdei", "woansdei", "tongersdei", "freed", "sneon"}, + {"Jan", "Feb", "Mrt", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Des"}, + {"Jannewaris", "Febrewaris", "Maart", "April", "Maaie", "Juny", "July", "Augustus", "Septimber", "Oktober", "Novimber", "Desimber"}, + {}, +} + +var localeTableGa = [5][]string{ + {"Domh", "Luan", "Máirt", "Céad", "Déar", "Aoine", "Sath"}, + {"Dé Domhnaigh", "Dé Luain", "Dé Máirt", "Dé Céadaoin", "Déardaoin", "Dé hAoine", "Dé Sathairn"}, + {"Ean", "Feabh", "Márta", "Aib", "Beal", "Meith", "Iúil", "Lún", "MFómh", "DFómh", "Samh", "Noll"}, + {"Eanáir", "Feabhra", "Márta", "Aibreán", "Bealtaine", "Meitheamh", "Iúil", "Lúnasa", "Meán Fómhair", "Deireadh Fómhair", "Samhain", "Nollaig"}, + {"r.n.", "i.n."}, +} + +var localeTableGaGB = [5][]string{ + {"Domh", "Luan", "Máirt", "Céad", "Déar", "Aoine", "Sath"}, + {"Dé Domhnaigh", "Dé Luain", "Dé Máirt", "Dé Céadaoin", "Déardaoin", "Dé hAoine", "Dé Sathairn"}, + {"Ean", "Feabh", "Márta", "Aib", "Beal", "Meith", "Iúil", "Lún", "MFómh", "DFómh", "Samh", "Noll"}, + {"Eanáir", "Feabhra", "Márta", "Aibreán", "Bealtaine", "Meitheamh", "Iúil", "Lúnasa", "Meán Fómhair", "Deireadh Fómhair", "Samhain", "Nollaig"}, + {"r.n.", "i.n."}, +} + +var localeTableGaIE = [5][]string{ + {"Domh", "Luan", "Máirt", "Céad", "Déar", "Aoine", "Sath"}, + {"Dé Domhnaigh", "Dé Luain", "Dé Máirt", "Dé Céadaoin", "Déardaoin", "Dé hAoine", "Dé Sathairn"}, + {"Ean", "Feabh", "Márta", "Aib", "Beal", "Meith", "Iúil", "Lún", "MFómh", "DFómh", "Samh", "Noll"}, + {"Eanáir", "Feabhra", "Márta", "Aibreán", "Bealtaine", "Meitheamh", "Iúil", "Lúnasa", "Meán Fómhair", "Deireadh Fómhair", "Samhain", "Nollaig"}, + {"r.n.", "i.n."}, +} + +var localeTableGaa = [5][]string{ + {"Hɔg", "Ju", "Juf", "Shɔ", "Soo", "Soh", "Hɔɔ"}, + {"Hɔgbaa", "Ju", "Jufɔ", "Shɔ", "Soo", "Sohaa", "Hɔɔ"}, + {"Aha", "Ofl", "Ots", "Abe", "Agb", "Otu", "Maa", "Man", "Gbo", "Ant", "Ale", "Afu"}, + {"Aharabata", "Oflɔ", "Otsokrikri", "Abeibe", "Agbiɛnaa", "Otukwajaŋ", "Maawɛ", "Manyawale", "Gbo", "Antɔŋ", "Alemle", "Afuabe"}, + {"LB", "SN"}, +} + +var localeTableGaaGH = [5][]string{ + {"Hɔg", "Ju", "Juf", "Shɔ", "Soo", "Soh", "Hɔɔ"}, + {"Hɔgbaa", "Ju", "Jufɔ", "Shɔ", "Soo", "Sohaa", "Hɔɔ"}, + {"Aha", "Ofl", "Ots", "Abe", "Agb", "Otu", "Maa", "Man", "Gbo", "Ant", "Ale", "Afu"}, + {"Aharabata", "Oflɔ", "Otsokrikri", "Abeibe", "Agbiɛnaa", "Otukwajaŋ", "Maawɛ", "Manyawale", "Gbo", "Antɔŋ", "Alemle", "Afuabe"}, + {"LB", "SN"}, +} + +var localeTableGd = [5][]string{ + {"DiD", "DiL", "DiM", "DiC", "Dia", "Dih", "DiS"}, + {"DiDòmhnaich", "DiLuain", "DiMàirt", "DiCiadain", "DiarDaoin", "DihAoine", "DiSathairne"}, + {"Faoi", "Gearr", "Màrt", "Gibl", "Cèit", "Ògmh", "Iuch", "Lùna", "Sult", "Dàmh", "Samh", "Dùbh"}, + {"dhen Fhaoilleach", "dhen Ghearran", "dhen Mhàrt", "dhen Ghiblean", "dhen Chèitean", "dhen Ògmhios", "dhen Iuchar", "dhen Lùnastal", "dhen t-Sultain", "dhen Dàmhair", "dhen t-Samhain", "dhen Dùbhlachd"}, + {"m", "f"}, +} + +var localeTableGdGB = [5][]string{ + {"DiD", "DiL", "DiM", "DiC", "Dia", "Dih", "DiS"}, + {"DiDòmhnaich", "DiLuain", "DiMàirt", "DiCiadain", "DiarDaoin", "DihAoine", "DiSathairne"}, + {"Faoi", "Gearr", "Màrt", "Gibl", "Cèit", "Ògmh", "Iuch", "Lùna", "Sult", "Dàmh", "Samh", "Dùbh"}, + {"dhen Fhaoilleach", "dhen Ghearran", "dhen Mhàrt", "dhen Ghiblean", "dhen Chèitean", "dhen Ògmhios", "dhen Iuchar", "dhen Lùnastal", "dhen t-Sultain", "dhen Dàmhair", "dhen t-Samhain", "dhen Dùbhlachd"}, + {"m", "f"}, +} + +var localeTableGez = [5][]string{ + {}, + {"እኁድ", "ሰኑይ", "ሠሉስ", "ራብዕ", "ሐሙስ", "ዓርበ", "ቀዳሚት"}, + {}, + {"ጠሐረ", "ከተተ", "መገበ", "አኀዘ", "ግንባት", "ሠንየ", "ሐመለ", "ነሐሰ", "ከረመ", "ጠቀመ", "ኀደረ", "ኀሠሠ"}, + {"ጽባሕ", "ምሴት"}, +} + +var localeTableGezER = [5][]string{ + {}, + {"እኁድ", "ሰኑይ", "ሠሉስ", "ራብዕ", "ሐሙስ", "ዓርበ", "ቀዳሚት"}, + {}, + {"ጠሐረ", "ከተተ", "መገበ", "አኀዘ", "ግንባት", "ሠንየ", "ሐመለ", "ነሐሰ", "ከረመ", "ጠቀመ", "ኀደረ", "ኀሠሠ"}, + {"ጽባሕ", "ምሴት"}, +} + +var localeTableGezET = [5][]string{ + {}, + {"እኁድ", "ሰኑይ", "ሠሉስ", "ራብዕ", "ሐሙስ", "ዓርበ", "ቀዳሚት"}, + {}, + {"ጠሐረ", "ከተተ", "መገበ", "አኀዘ", "ግንባት", "ሠንየ", "ሐመለ", "ነሐሰ", "ከረመ", "ጠቀመ", "ኀደረ", "ኀሠሠ"}, + {"ጽባሕ", "ምሴት"}, +} + +var localeTableGl = [5][]string{ + {"dom.", "luns", "mar.", "mér.", "xov.", "ven.", "sáb."}, + {"domingo", "luns", "martes", "mércores", "xoves", "venres", "sábado"}, + {"xan.", "feb.", "mar.", "abr.", "maio", "xuño", "xul.", "ago.", "set.", "out.", "nov.", "dec."}, + {"xaneiro", "febreiro", "marzo", "abril", "maio", "xuño", "xullo", "agosto", "setembro", "outubro", "novembro", "decembro"}, + {"a.m.", "p.m."}, +} + +var localeTableGlES = [5][]string{ + {"dom.", "luns", "mar.", "mér.", "xov.", "ven.", "sáb."}, + {"domingo", "luns", "martes", "mércores", "xoves", "venres", "sábado"}, + {"xan.", "feb.", "mar.", "abr.", "maio", "xuño", "xul.", "ago.", "set.", "out.", "nov.", "dec."}, + {"xaneiro", "febreiro", "marzo", "abril", "maio", "xuño", "xullo", "agosto", "setembro", "outubro", "novembro", "decembro"}, + {"a.m.", "p.m."}, +} + +var localeTableGn = [5][]string{ + {}, + {"Arateĩ", "Arakõi", "Araapy", "Ararundy", "Arapo", "Arapoteĩ", "Arapokõi"}, + {}, + {"Jasyteĩ", "Jasykõi", "Jasyapy", "Jasyrundy", "Jasypo", "Jasypoteĩ", "Jasypokõi", "Jasypoapy", "Jasyporundy", "Jasypa", "Jasypateĩ", "Jasypakõi"}, + {}, +} + +var localeTableGnPY = [5][]string{ + {}, + {"Arateĩ", "Arakõi", "Araapy", "Ararundy", "Arapo", "Arapoteĩ", "Arapokõi"}, + {}, + {"Jasyteĩ", "Jasykõi", "Jasyapy", "Jasyrundy", "Jasypo", "Jasypoteĩ", "Jasypokõi", "Jasypoapy", "Jasyporundy", "Jasypa", "Jasypateĩ", "Jasypakõi"}, + {}, +} + +var localeTableGsw = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGswCH = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGswFR = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGswLI = [5][]string{ + {"Su.", "Mä.", "Zi.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sunntig", "Määntig", "Ziischtig", "Mittwuch", "Dunschtig", "Friitig", "Samschtig"}, + {"Jan", "Feb", "Mär", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez"}, + {"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "Auguscht", "Septämber", "Oktoober", "Novämber", "Dezämber"}, + {"vorm.", "nam."}, +} + +var localeTableGu = [5][]string{ + {"રવિ", "સોમ", "મંગળ", "બુધ", "ગુરુ", "શુક્ર", "શનિ"}, + {"રવિવાર", "સોમવાર", "મંગળવાર", "બુધવાર", "ગુરુવાર", "શુક્રવાર", "શનિવાર"}, + {"જાન્યુ", "ફેબ્રુ", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટે", "ઑક્ટો", "નવે", "ડિસે"}, + {"જાન્યુઆરી", "ફેબ્રુઆરી", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટેમ્બર", "ઑક્ટોબર", "નવેમ્બર", "ડિસેમ્બર"}, + {}, +} + +var localeTableGuIN = [5][]string{ + {"રવિ", "સોમ", "મંગળ", "બુધ", "ગુરુ", "શુક્ર", "શનિ"}, + {"રવિવાર", "સોમવાર", "મંગળવાર", "બુધવાર", "ગુરુવાર", "શુક્રવાર", "શનિવાર"}, + {"જાન્યુ", "ફેબ્રુ", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટે", "ઑક્ટો", "નવે", "ડિસે"}, + {"જાન્યુઆરી", "ફેબ્રુઆરી", "માર્ચ", "એપ્રિલ", "મે", "જૂન", "જુલાઈ", "ઑગસ્ટ", "સપ્ટેમ્બર", "ઑક્ટોબર", "નવેમ્બર", "ડિસેમ્બર"}, + {}, +} + +var localeTableGuz = [5][]string{ + {"Cpr", "Ctt", "Cmn", "Cmt", "Ars", "Icm", "Est"}, + {"Chumapiri", "Chumatato", "Chumaine", "Chumatano", "Aramisi", "Ichuma", "Esabato"}, + {"Can", "Feb", "Mac", "Apr", "Mei", "Jun", "Cul", "Agt", "Sep", "Okt", "Nob", "Dis"}, + {"Chanuari", "Feburari", "Machi", "Apiriri", "Mei", "Juni", "Chulai", "Agosti", "Septemba", "Okitoba", "Nobemba", "Disemba"}, + {"Ma", "Mo"}, +} + +var localeTableGuzKE = [5][]string{ + {"Cpr", "Ctt", "Cmn", "Cmt", "Ars", "Icm", "Est"}, + {"Chumapiri", "Chumatato", "Chumaine", "Chumatano", "Aramisi", "Ichuma", "Esabato"}, + {"Can", "Feb", "Mac", "Apr", "Mei", "Jun", "Cul", "Agt", "Sep", "Okt", "Nob", "Dis"}, + {"Chanuari", "Feburari", "Machi", "Apiriri", "Mei", "Juni", "Chulai", "Agosti", "Septemba", "Okitoba", "Nobemba", "Disemba"}, + {"Ma", "Mo"}, +} + +var localeTableGv = [5][]string{ + {"Jed", "Jel", "Jem", "Jerc", "Jerd", "Jeh", "Jes"}, + {"Jedoonee", "Jelhein", "Jemayrt", "Jercean", "Jerdein", "Jeheiney", "Jesarn"}, + {"J-guer", "T-arree", "Mayrnt", "Avrril", "Boaldyn", "M-souree", "J-souree", "Luanistyn", "M-fouyir", "J-fouyir", "M-Houney", "M-Nollick"}, + {"Jerrey-geuree", "Toshiaght-arree", "Mayrnt", "Averil", "Boaldyn", "Mean-souree", "Jerrey-souree", "Luanistyn", "Mean-fouyir", "Jerrey-fouyir", "Mee Houney", "Mee ny Nollick"}, + {"a.m.", "p.m."}, +} + +var localeTableGvIM = [5][]string{ + {"Jed", "Jel", "Jem", "Jerc", "Jerd", "Jeh", "Jes"}, + {"Jedoonee", "Jelhein", "Jemayrt", "Jercean", "Jerdein", "Jeheiney", "Jesarn"}, + {"J-guer", "T-arree", "Mayrnt", "Avrril", "Boaldyn", "M-souree", "J-souree", "Luanistyn", "M-fouyir", "J-fouyir", "M-Houney", "M-Nollick"}, + {"Jerrey-geuree", "Toshiaght-arree", "Mayrnt", "Averil", "Boaldyn", "Mean-souree", "Jerrey-souree", "Luanistyn", "Mean-fouyir", "Jerrey-fouyir", "Mee Houney", "Mee ny Nollick"}, + {"a.m.", "p.m."}, +} + +var localeTableHa = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaArab = [5][]string{ + {"لَح", "لِت", "تَل", "لَر", "أَلْح", "جُم", "أَسَ"}, + {"لَحَدِ", "لِتِنِنْ", "تَلَتَ", "لَرَبَ", "أَلْحَمِسْ", "جُمَعَ", "أَسَبَرْ"}, + {"جَن", "ڢَب", "مَر", "أَڢْر", "مَي", "يُون", "يُول", "أَغُ", "سَت", "أُكْت", "نُو", "دِس"}, + {"جَنَيْرُ", "ڢَبْرَيْرُ", "مَرِسْ", "أَڢْرِلُ", "مَيُ", "يُونِ", "يُولِ", "أَغُسْتَ", "سَتُمْبَ", "أُكْتوُبَ", "نُوَمْبَ", "دِسَمْبَ"}, + {"A.M.", "P.M."}, +} + +var localeTableHaArabNG = [5][]string{ + {"لَح", "لِت", "تَل", "لَر", "أَلْح", "جُم", "أَسَ"}, + {"لَحَدِ", "لِتِنِنْ", "تَلَتَ", "لَرَبَ", "أَلْحَمِسْ", "جُمَعَ", "أَسَبَرْ"}, + {"جَن", "ڢَب", "مَر", "أَڢْر", "مَي", "يُون", "يُول", "أَغُ", "سَت", "أُكْت", "نُو", "دِس"}, + {"جَنَيْرُ", "ڢَبْرَيْرُ", "مَرِسْ", "أَڢْرِلُ", "مَيُ", "يُونِ", "يُولِ", "أَغُسْتَ", "سَتُمْبَ", "أُكْتوُبَ", "نُوَمْبَ", "دِسَمْبَ"}, + {"A.M.", "P.M."}, +} + +var localeTableHaArabSD = [5][]string{ + {"لَح", "لِت", "تَل", "لَر", "أَلْح", "جُم", "أَسَ"}, + {"لَحَدِ", "لِتِنِنْ", "تَلَتَ", "لَرَبَ", "أَلْحَمِسْ", "جُمَعَ", "أَسَبَرْ"}, + {"جَن", "ڢَب", "مَر", "أَڢْر", "مَي", "يُون", "يُول", "أَغُ", "سَت", "أُكْت", "نُو", "دِس"}, + {"جَنَيْرُ", "ڢَبْرَيْرُ", "مَرِسْ", "أَڢْرِلُ", "مَيُ", "يُونِ", "يُولِ", "أَغُسْتَ", "سَتُمْبَ", "أُكْتوُبَ", "نُوَمْبَ", "دِسَمْبَ"}, + {"A.M.", "P.M."}, +} + +var localeTableHaGH = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaNE = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaNG = [5][]string{ + {"Lah", "Lit", "Tal", "Lar", "Alh", "Jum", "Asa"}, + {"Lahadi", "Litinin", "Talata", "Laraba", "Alhamis", "Jummaʼa", "Asabar"}, + {"Jan", "Fab", "Mar", "Afi", "May", "Yun", "Yul", "Agu", "Sat", "Okt", "Nuw", "Dis"}, + {"Janairu", "Faburairu", "Maris", "Afirilu", "Mayu", "Yuni", "Yuli", "Agusta", "Satumba", "Oktoba", "Nuwamba", "Disamba"}, + {"SF", "YM"}, +} + +var localeTableHaw = [5][]string{ + {"LP", "P1", "P2", "P3", "P4", "P5", "P6"}, + {"Lāpule", "Poʻakahi", "Poʻalua", "Poʻakolu", "Poʻahā", "Poʻalima", "Poʻaono"}, + {"Ian.", "Pep.", "Mal.", "ʻAp.", "Mei", "Iun.", "Iul.", "ʻAu.", "Kep.", "ʻOk.", "Now.", "Kek."}, + {"Ianuali", "Pepeluali", "Malaki", "ʻApelila", "Mei", "Iune", "Iulai", "ʻAukake", "Kepakemapa", "ʻOkakopa", "Nowemapa", "Kekemapa"}, + {}, +} + +var localeTableHawUS = [5][]string{ + {"LP", "P1", "P2", "P3", "P4", "P5", "P6"}, + {"Lāpule", "Poʻakahi", "Poʻalua", "Poʻakolu", "Poʻahā", "Poʻalima", "Poʻaono"}, + {"Ian.", "Pep.", "Mal.", "ʻAp.", "Mei", "Iun.", "Iul.", "ʻAu.", "Kep.", "ʻOk.", "Now.", "Kek."}, + {"Ianuali", "Pepeluali", "Malaki", "ʻApelila", "Mei", "Iune", "Iulai", "ʻAukake", "Kepakemapa", "ʻOkakopa", "Nowemapa", "Kekemapa"}, + {}, +} + +var localeTableHe = [5][]string{ + {"יום א׳", "יום ב׳", "יום ג׳", "יום ד׳", "יום ה׳", "יום ו׳", "שבת"}, + {"יום ראשון", "יום שני", "יום שלישי", "יום רביעי", "יום חמישי", "יום שישי", "יום שבת"}, + {"ינו׳", "פבר׳", "מרץ", "אפר׳", "מאי", "יוני", "יולי", "אוג׳", "ספט׳", "אוק׳", "נוב׳", "דצמ׳"}, + {"ינואר", "פברואר", "מרץ", "אפריל", "מאי", "יוני", "יולי", "אוגוסט", "ספטמבר", "אוקטובר", "נובמבר", "דצמבר"}, + {"לפנה״צ", "אחה״צ"}, +} + +var localeTableHeIL = [5][]string{ + {"יום א׳", "יום ב׳", "יום ג׳", "יום ד׳", "יום ה׳", "יום ו׳", "שבת"}, + {"יום ראשון", "יום שני", "יום שלישי", "יום רביעי", "יום חמישי", "יום שישי", "יום שבת"}, + {"ינו׳", "פבר׳", "מרץ", "אפר׳", "מאי", "יוני", "יולי", "אוג׳", "ספט׳", "אוק׳", "נוב׳", "דצמ׳"}, + {"ינואר", "פברואר", "מרץ", "אפריל", "מאי", "יוני", "יולי", "אוגוסט", "ספטמבר", "אוקטובר", "נובמבר", "דצמבר"}, + {"לפנה״צ", "אחה״צ"}, +} + +var localeTableHi = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"am", "pm"}, +} + +var localeTableHiIN = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"am", "pm"}, +} + +var localeTableHiLatn = [5][]string{ + {"Ravi", "Som", "Mangal", "Budh", "Guru", "Shukra", "Shani"}, + {"Raviwaar", "Somwaar", "Mangalwaar", "Budhwaar", "Guruwaar", "Shukrawaar", "Shaniwaar"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {}, + {"AM", "PM"}, +} + +var localeTableHiLatnIN = [5][]string{ + {"Ravi", "Som", "Mangal", "Budh", "Guru", "Shukra", "Shani"}, + {"Raviwaar", "Somwaar", "Mangalwaar", "Budhwaar", "Guruwaar", "Shukrawaar", "Shaniwaar"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {}, + {"AM", "PM"}, +} + +var localeTableHnj = [5][]string{ + {}, + {}, + {}, + {"𞄆𞄬", "𞄛𞄨𞄱𞄄𞄤𞄲𞄨", "𞄒𞄫𞄰𞄒𞄪𞄱", "𞄤𞄨𞄱", "𞄀𞄪𞄴", "𞄛𞄤𞄱𞄞𞄤𞄦", "𞄔𞄩𞄴𞄆𞄨𞄰", "𞄕𞄩𞄲𞄔𞄄𞄰𞄤", "𞄛𞄤𞄱𞄒𞄤𞄰", "𞄪𞄱𞄀𞄤𞄴", "𞄚𞄦𞄲𞄤𞄚𞄄𞄰𞄫", "𞄒𞄩𞄱𞄔𞄬𞄴"}, + {}, +} + +var localeTableHr = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"sij", "velj", "ožu", "tra", "svi", "lip", "srp", "kol", "ruj", "lis", "stu", "pro"}, + {"siječnja", "veljače", "ožujka", "travnja", "svibnja", "lipnja", "srpnja", "kolovoza", "rujna", "listopada", "studenoga", "prosinca"}, + {}, +} + +var localeTableHrBA = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"sij", "velj", "ožu", "tra", "svi", "lip", "srp", "kol", "ruj", "lis", "stu", "pro"}, + {"siječnja", "veljače", "ožujka", "travnja", "svibnja", "lipnja", "srpnja", "kolovoza", "rujna", "listopada", "studenoga", "prosinca"}, + {}, +} + +var localeTableHrHR = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"sij", "velj", "ožu", "tra", "svi", "lip", "srp", "kol", "ruj", "lis", "stu", "pro"}, + {"siječnja", "veljače", "ožujka", "travnja", "svibnja", "lipnja", "srpnja", "kolovoza", "rujna", "listopada", "studenoga", "prosinca"}, + {}, +} + +var localeTableHsb = [5][]string{ + {"nje", "pón", "wut", "srj", "štw", "pja", "sob"}, + {"njedźela", "póndźela", "wutora", "srjeda", "štwórtk", "pjatk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "mej.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "meje", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "pop."}, +} + +var localeTableHsbDE = [5][]string{ + {"nje", "pón", "wut", "srj", "štw", "pja", "sob"}, + {"njedźela", "póndźela", "wutora", "srjeda", "štwórtk", "pjatk", "sobota"}, + {"jan.", "feb.", "měr.", "apr.", "mej.", "jun.", "jul.", "awg.", "sep.", "okt.", "now.", "dec."}, + {"januara", "februara", "měrca", "apryla", "meje", "junija", "julija", "awgusta", "septembra", "oktobra", "nowembra", "decembra"}, + {"dop.", "pop."}, +} + +var localeTableHu = [5][]string{ + {"V", "H", "K", "Sze", "Cs", "P", "Szo"}, + {"vasárnap", "hétfő", "kedd", "szerda", "csütörtök", "péntek", "szombat"}, + {"jan.", "febr.", "márc.", "ápr.", "máj.", "jún.", "júl.", "aug.", "szept.", "okt.", "nov.", "dec."}, + {"január", "február", "március", "április", "május", "június", "július", "augusztus", "szeptember", "október", "november", "december"}, + {"de.", "du."}, +} + +var localeTableHuHU = [5][]string{ + {"V", "H", "K", "Sze", "Cs", "P", "Szo"}, + {"vasárnap", "hétfő", "kedd", "szerda", "csütörtök", "péntek", "szombat"}, + {"jan.", "febr.", "márc.", "ápr.", "máj.", "jún.", "júl.", "aug.", "szept.", "okt.", "nov.", "dec."}, + {"január", "február", "március", "április", "május", "június", "július", "augusztus", "szeptember", "október", "november", "december"}, + {"de.", "du."}, +} + +var localeTableHy = [5][]string{ + {"կիր", "երկ", "երք", "չրք", "հնգ", "ուր", "շբթ"}, + {"կիրակի", "երկուշաբթի", "երեքշաբթի", "չորեքշաբթի", "հինգշաբթի", "ուրբաթ", "շաբաթ"}, + {"հնվ", "փտվ", "մրտ", "ապր", "մյս", "հնս", "հլս", "օգս", "սեպ", "հոկ", "նոյ", "դեկ"}, + {"հունվարի", "փետրվարի", "մարտի", "ապրիլի", "մայիսի", "հունիսի", "հուլիսի", "օգոստոսի", "սեպտեմբերի", "հոկտեմբերի", "նոյեմբերի", "դեկտեմբերի"}, + {"ա", "հ"}, +} + +var localeTableHyAM = [5][]string{ + {"կիր", "երկ", "երք", "չրք", "հնգ", "ուր", "շբթ"}, + {"կիրակի", "երկուշաբթի", "երեքշաբթի", "չորեքշաբթի", "հինգշաբթի", "ուրբաթ", "շաբաթ"}, + {"հնվ", "փտվ", "մրտ", "ապր", "մյս", "հնս", "հլս", "օգս", "սեպ", "հոկ", "նոյ", "դեկ"}, + {"հունվարի", "փետրվարի", "մարտի", "ապրիլի", "մայիսի", "հունիսի", "հուլիսի", "օգոստոսի", "սեպտեմբերի", "հոկտեմբերի", "նոյեմբերի", "դեկտեմբերի"}, + {"ա", "հ"}, +} + +var localeTableIa = [5][]string{ + {"dom", "lun", "mar", "mer", "jov", "ven", "sab"}, + {"dominica", "lunedi", "martedi", "mercuridi", "jovedi", "venerdi", "sabbato"}, + {"jan", "feb", "mar", "apr", "mai", "jun", "jul", "aug", "sep", "oct", "nov", "dec"}, + {"januario", "februario", "martio", "april", "maio", "junio", "julio", "augusto", "septembre", "octobre", "novembre", "decembre"}, + {}, +} + +var localeTableIa001 = [5][]string{ + {"dom", "lun", "mar", "mer", "jov", "ven", "sab"}, + {"dominica", "lunedi", "martedi", "mercuridi", "jovedi", "venerdi", "sabbato"}, + {"jan", "feb", "mar", "apr", "mai", "jun", "jul", "aug", "sep", "oct", "nov", "dec"}, + {"januario", "februario", "martio", "april", "maio", "junio", "julio", "augusto", "septembre", "octobre", "novembre", "decembre"}, + {}, +} + +var localeTableId = [5][]string{ + {"Min", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agu", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {}, +} + +var localeTableIdID = [5][]string{ + {"Min", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agu", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {}, +} + +var localeTableIe = [5][]string{ + {"sol.", "lun.", "mar.", "mer.", "jov.", "ven.", "sat."}, + {"soledí", "lunedí", "mardí", "mercurdí", "jovedí", "venerdí", "saturdí"}, + {"jan.", "febr.", "mar.", "apr.", "may", "jun.", "julí", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"januar", "februar", "marte", "april", "may", "junio", "julí", "august", "septembre", "octobre", "novembre", "decembre"}, + {"a.m.", "p.m."}, +} + +var localeTableIeEE = [5][]string{ + {"sol.", "lun.", "mar.", "mer.", "jov.", "ven.", "sat."}, + {"soledí", "lunedí", "mardí", "mercurdí", "jovedí", "venerdí", "saturdí"}, + {"jan.", "febr.", "mar.", "apr.", "may", "jun.", "julí", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"januar", "februar", "marte", "april", "may", "junio", "julí", "august", "septembre", "octobre", "novembre", "decembre"}, + {"a.m.", "p.m."}, +} + +var localeTableIg = [5][]string{ + {"Sọn", "Mọn", "Tiu", "Wen", "Tọọ", "Fraị", "Sat"}, + {"Sọndee", "Mọnde", "Tiuzdee", "Wenezdee", "Tọọzdee", "Fraịdee", "Satọdee"}, + {"Jen", "Feb", "Maa", "Epr", "Mee", "Juu", "Jul", "Ọgọ", "Sep", "Ọkt", "Nov", "Dis"}, + {"Jenụwarị", "Febrụwarị", "Maachị", "Epreel", "Mee", "Juun", "Julaị", "Ọgọọst", "Septemba", "Ọktoba", "Novemba", "Disemba"}, + {"A.M.", "P.M."}, +} + +var localeTableIgNG = [5][]string{ + {"Sọn", "Mọn", "Tiu", "Wen", "Tọọ", "Fraị", "Sat"}, + {"Sọndee", "Mọnde", "Tiuzdee", "Wenezdee", "Tọọzdee", "Fraịdee", "Satọdee"}, + {"Jen", "Feb", "Maa", "Epr", "Mee", "Juu", "Jul", "Ọgọ", "Sep", "Ọkt", "Nov", "Dis"}, + {"Jenụwarị", "Febrụwarị", "Maachị", "Epreel", "Mee", "Juun", "Julaị", "Ọgọọst", "Septemba", "Ọktoba", "Novemba", "Disemba"}, + {"A.M.", "P.M."}, +} + +var localeTableIi = [5][]string{ + {"ꑭꆏ", "ꆏꋍ", "ꆏꑍ", "ꆏꌕ", "ꆏꇖ", "ꆏꉬ", "ꆏꃘ"}, + {"ꑭꆏꑍ", "ꆏꊂꋍ", "ꆏꊂꑍ", "ꆏꊂꌕ", "ꆏꊂꇖ", "ꆏꊂꉬ", "ꆏꊂꃘ"}, + {}, + {"ꋍꆪ", "ꑍꆪ", "ꌕꆪ", "ꇖꆪ", "ꉬꆪ", "ꃘꆪ", "ꏃꆪ", "ꉆꆪ", "ꈬꆪ", "ꊰꆪ", "ꊰꊪꆪ", "ꊰꑋꆪ"}, + {"ꎸꄑ", "ꁯꋒ"}, +} + +var localeTableIiCN = [5][]string{ + {"ꑭꆏ", "ꆏꋍ", "ꆏꑍ", "ꆏꌕ", "ꆏꇖ", "ꆏꉬ", "ꆏꃘ"}, + {"ꑭꆏꑍ", "ꆏꊂꋍ", "ꆏꊂꑍ", "ꆏꊂꌕ", "ꆏꊂꇖ", "ꆏꊂꉬ", "ꆏꊂꃘ"}, + {}, + {"ꋍꆪ", "ꑍꆪ", "ꌕꆪ", "ꇖꆪ", "ꉬꆪ", "ꃘꆪ", "ꏃꆪ", "ꉆꆪ", "ꈬꆪ", "ꊰꆪ", "ꊰꊪꆪ", "ꊰꑋꆪ"}, + {"ꎸꄑ", "ꁯꋒ"}, +} + +var localeTableIs = [5][]string{ + {"sun.", "mán.", "þri.", "mið.", "fim.", "fös.", "lau."}, + {"sunnudagur", "mánudagur", "þriðjudagur", "miðvikudagur", "fimmtudagur", "föstudagur", "laugardagur"}, + {"jan.", "feb.", "mar.", "apr.", "maí", "jún.", "júl.", "ágú.", "sep.", "okt.", "nóv.", "des."}, + {"janúar", "febrúar", "mars", "apríl", "maí", "júní", "júlí", "ágúst", "september", "október", "nóvember", "desember"}, + {"f.h.", "e.h."}, +} + +var localeTableIsIS = [5][]string{ + {"sun.", "mán.", "þri.", "mið.", "fim.", "fös.", "lau."}, + {"sunnudagur", "mánudagur", "þriðjudagur", "miðvikudagur", "fimmtudagur", "föstudagur", "laugardagur"}, + {"jan.", "feb.", "mar.", "apr.", "maí", "jún.", "júl.", "ágú.", "sep.", "okt.", "nóv.", "des."}, + {"janúar", "febrúar", "mars", "apríl", "maí", "júní", "júlí", "ágúst", "september", "október", "nóvember", "desember"}, + {"f.h.", "e.h."}, +} + +var localeTableIt = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItCH = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItIT = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItSM = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableItVA = [5][]string{ + {"dom", "lun", "mar", "mer", "gio", "ven", "sab"}, + {"domenica", "lunedì", "martedì", "mercoledì", "giovedì", "venerdì", "sabato"}, + {"gen", "feb", "mar", "apr", "mag", "giu", "lug", "ago", "set", "ott", "nov", "dic"}, + {"gennaio", "febbraio", "marzo", "aprile", "maggio", "giugno", "luglio", "agosto", "settembre", "ottobre", "novembre", "dicembre"}, + {"m.", "p."}, +} + +var localeTableIu = [5][]string{ + {}, + {"ᓈᑦᑏᖑᔭᖅ", "ᓇᒡᒐᔾᔭᐅ", "ᓇᒡᒐᔾᔭᐅᓕᖅᑭ", "ᐱᖓᑦᓯᖅ", "ᓯᑕᒻᒥᖅ", "ᑕᓪᓕᒻᒥᐅᑦ", "ᓈᑦᓰᖑᔭᓛᕐᓂᐊᖅ"}, + {}, + {"ᔭᓐᓄᐊᓕ", "ᕕᕝᕗᐊᓕ", "ᒫᑦᓯ", "ᐊᐃᑉᐳᓗ", "ᒪᐃ", "ᔫᓂ", "ᔪᓚᐃ", "ᐊᐅᒡᒍᓯ", "ᓯᑎᐱᕆ", "ᐆᑦᑑᕝᕙ", "ᓄᕕᐱᕆ", "ᑎᓯᐱᕆ"}, + {"am", "pm"}, +} + +var localeTableIuCA = [5][]string{ + {}, + {"ᓈᑦᑏᖑᔭᖅ", "ᓇᒡᒐᔾᔭᐅ", "ᓇᒡᒐᔾᔭᐅᓕᖅᑭ", "ᐱᖓᑦᓯᖅ", "ᓯᑕᒻᒥᖅ", "ᑕᓪᓕᒻᒥᐅᑦ", "ᓈᑦᓰᖑᔭᓛᕐᓂᐊᖅ"}, + {}, + {"ᔭᓐᓄᐊᓕ", "ᕕᕝᕗᐊᓕ", "ᒫᑦᓯ", "ᐊᐃᑉᐳᓗ", "ᒪᐃ", "ᔫᓂ", "ᔪᓚᐃ", "ᐊᐅᒡᒍᓯ", "ᓯᑎᐱᕆ", "ᐆᑦᑑᕝᕙ", "ᓄᕕᐱᕆ", "ᑎᓯᐱᕆ"}, + {"am", "pm"}, +} + +var localeTableJa = [5][]string{ + {"日", "月", "火", "水", "木", "金", "土"}, + {"日曜日", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"午前", "午後"}, +} + +var localeTableJaJP = [5][]string{ + {"日", "月", "火", "水", "木", "金", "土"}, + {"日曜日", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"午前", "午後"}, +} + +var localeTableJgo = [5][]string{ + {}, + {"Sɔ́ndi", "Mɔ́ndi", "Ápta Mɔ́ndi", "Wɛ́nɛsɛdɛ", "Tɔ́sɛdɛ", "Fɛlâyɛdɛ", "Sásidɛ"}, + {}, + {"Nduŋmbi Saŋ", "Pɛsaŋ Pɛ́pá", "Pɛsaŋ Pɛ́tát", "Pɛsaŋ Pɛ́nɛ́kwa", "Pɛsaŋ Pataa", "Pɛsaŋ Pɛ́nɛ́ntúkú", "Pɛsaŋ Saambá", "Pɛsaŋ Pɛ́nɛ́fɔm", "Pɛsaŋ Pɛ́nɛ́pfúꞋú", "Pɛsaŋ Nɛgɛ́m", "Pɛsaŋ Ntsɔ̌pmɔ́", "Pɛsaŋ Ntsɔ̌ppá"}, + {"mbaꞌmbaꞌ", "ŋkambɔ́tnji"}, +} + +var localeTableJgoCM = [5][]string{ + {}, + {"Sɔ́ndi", "Mɔ́ndi", "Ápta Mɔ́ndi", "Wɛ́nɛsɛdɛ", "Tɔ́sɛdɛ", "Fɛlâyɛdɛ", "Sásidɛ"}, + {}, + {"Nduŋmbi Saŋ", "Pɛsaŋ Pɛ́pá", "Pɛsaŋ Pɛ́tát", "Pɛsaŋ Pɛ́nɛ́kwa", "Pɛsaŋ Pataa", "Pɛsaŋ Pɛ́nɛ́ntúkú", "Pɛsaŋ Saambá", "Pɛsaŋ Pɛ́nɛ́fɔm", "Pɛsaŋ Pɛ́nɛ́pfúꞋú", "Pɛsaŋ Nɛgɛ́m", "Pɛsaŋ Ntsɔ̌pmɔ́", "Pɛsaŋ Ntsɔ̌ppá"}, + {"mbaꞌmbaꞌ", "ŋkambɔ́tnji"}, +} + +var localeTableJmc = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableJmcTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableJv = [5][]string{ + {"Ahad", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Ahad", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agt", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {"Isuk", "Wengi"}, +} + +var localeTableJvID = [5][]string{ + {"Ahad", "Sen", "Sel", "Rab", "Kam", "Jum", "Sab"}, + {"Ahad", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Agt", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"}, + {"Isuk", "Wengi"}, +} + +var localeTableKa = [5][]string{ + {"კვი", "ორშ", "სამ", "ოთხ", "ხუთ", "პარ", "შაბ"}, + {"კვირა", "ორშაბათი", "სამშაბათი", "ოთხშაბათი", "ხუთშაბათი", "პარასკევი", "შაბათი"}, + {"იან", "თებ", "მარ", "აპრ", "მაი", "ივნ", "ივლ", "აგვ", "სექ", "ოქტ", "ნოე", "დეკ"}, + {"იანვარი", "თებერვალი", "მარტი", "აპრილი", "მაისი", "ივნისი", "ივლისი", "აგვისტო", "სექტემბერი", "ოქტომბერი", "ნოემბერი", "დეკემბერი"}, + {"a", "p"}, +} + +var localeTableKaGE = [5][]string{ + {"კვი", "ორშ", "სამ", "ოთხ", "ხუთ", "პარ", "შაბ"}, + {"კვირა", "ორშაბათი", "სამშაბათი", "ოთხშაბათი", "ხუთშაბათი", "პარასკევი", "შაბათი"}, + {"იან", "თებ", "მარ", "აპრ", "მაი", "ივნ", "ივლ", "აგვ", "სექ", "ოქტ", "ნოე", "დეკ"}, + {"იანვარი", "თებერვალი", "მარტი", "აპრილი", "მაისი", "ივნისი", "ივლისი", "აგვისტო", "სექტემბერი", "ოქტომბერი", "ნოემბერი", "დეკემბერი"}, + {"a", "p"}, +} + +var localeTableKab = [5][]string{ + {"Yan", "San", "Kraḍ", "Kuẓ", "Sam", "Sḍis", "Say"}, + {"Yanass", "Sanass", "Kraḍass", "Kuẓass", "Samass", "Sḍisass", "Sayass"}, + {"Yen", "Fur", "Meɣ", "Yeb", "May", "Yun", "Yul", "Ɣuc", "Cte", "Tub", "Nun", "Duǧ"}, + {"Yennayer", "Fuṛar", "Meɣres", "Yebrir", "Mayyu", "Yunyu", "Yulyu", "Ɣuct", "Ctembeṛ", "Tubeṛ", "Nunembeṛ", "Duǧembeṛ"}, + {"ntufat", "ntmeddit"}, +} + +var localeTableKabDZ = [5][]string{ + {"Yan", "San", "Kraḍ", "Kuẓ", "Sam", "Sḍis", "Say"}, + {"Yanass", "Sanass", "Kraḍass", "Kuẓass", "Samass", "Sḍisass", "Sayass"}, + {"Yen", "Fur", "Meɣ", "Yeb", "May", "Yun", "Yul", "Ɣuc", "Cte", "Tub", "Nun", "Duǧ"}, + {"Yennayer", "Fuṛar", "Meɣres", "Yebrir", "Mayyu", "Yunyu", "Yulyu", "Ɣuct", "Ctembeṛ", "Tubeṛ", "Nunembeṛ", "Duǧembeṛ"}, + {"ntufat", "ntmeddit"}, +} + +var localeTableKaj = [5][]string{ + {"Lad", "Lin", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Lintani", "Talata", "Larba", "Lamit", "Juma", "Asabar"}, + {"A̱yr", "A̱hw", "A̱ta", "A̱na", "A̱pf", "A̱ki", "A̱ty", "A̱ni", "A̱ku", "Swa", "Sby", "Sbh"}, + {"Hywan A̱yrnig", "Hywan A̱hwa", "Hywan A̱tat", "Hywan A̱naai", "Hywan A̱pfwon", "Hywan A̱kitat", "Hywan A̱tyirin", "Hywan A̱ninai", "Hywan A̱kumviriyin", "Hywan Swak", "Hywan Swak B’a̱yrnig", "Hywan Swak B’a̱hwa"}, + {"A.M.", "P.M."}, +} + +var localeTableKajNG = [5][]string{ + {"Lad", "Lin", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Lintani", "Talata", "Larba", "Lamit", "Juma", "Asabar"}, + {"A̱yr", "A̱hw", "A̱ta", "A̱na", "A̱pf", "A̱ki", "A̱ty", "A̱ni", "A̱ku", "Swa", "Sby", "Sbh"}, + {"Hywan A̱yrnig", "Hywan A̱hwa", "Hywan A̱tat", "Hywan A̱naai", "Hywan A̱pfwon", "Hywan A̱kitat", "Hywan A̱tyirin", "Hywan A̱ninai", "Hywan A̱kumviriyin", "Hywan Swak", "Hywan Swak B’a̱yrnig", "Hywan Swak B’a̱hwa"}, + {"A.M.", "P.M."}, +} + +var localeTableKam = [5][]string{ + {"Wky", "Wkw", "Wkl", "Wtũ", "Wkn", "Wtn", "Wth"}, + {"Wa kyumwa", "Wa kwambĩlĩlya", "Wa kelĩ", "Wa katatũ", "Wa kana", "Wa katano", "Wa thanthatũ"}, + {"Mbe", "Kel", "Ktũ", "Kan", "Ktn", "Tha", "Moo", "Nya", "Knd", "Ĩku", "Ĩkm", "Ĩkl"}, + {"Mwai wa mbee", "Mwai wa kelĩ", "Mwai wa katatũ", "Mwai wa kana", "Mwai wa katano", "Mwai wa thanthatũ", "Mwai wa muonza", "Mwai wa nyaanya", "Mwai wa kenda", "Mwai wa ĩkumi", "Mwai wa ĩkumi na ĩmwe", "Mwai wa ĩkumi na ilĩ"}, + {"Ĩyakwakya", "Ĩyawĩoo"}, +} + +var localeTableKamKE = [5][]string{ + {"Wky", "Wkw", "Wkl", "Wtũ", "Wkn", "Wtn", "Wth"}, + {"Wa kyumwa", "Wa kwambĩlĩlya", "Wa kelĩ", "Wa katatũ", "Wa kana", "Wa katano", "Wa thanthatũ"}, + {"Mbe", "Kel", "Ktũ", "Kan", "Ktn", "Tha", "Moo", "Nya", "Knd", "Ĩku", "Ĩkm", "Ĩkl"}, + {"Mwai wa mbee", "Mwai wa kelĩ", "Mwai wa katatũ", "Mwai wa kana", "Mwai wa katano", "Mwai wa thanthatũ", "Mwai wa muonza", "Mwai wa nyaanya", "Mwai wa kenda", "Mwai wa ĩkumi", "Mwai wa ĩkumi na ĩmwe", "Mwai wa ĩkumi na ilĩ"}, + {"Ĩyakwakya", "Ĩyawĩoo"}, +} + +var localeTableKcg = [5][]string{ + {"Lad", "Tan", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Tanii", "Talata", "Larba", "Lamit", "Juma", "Asabat"}, + {"Juw", "Swi", "Tsa", "Nya", "Tsw", "Ata", "Ana", "Ari", "Aku", "Swa", "Man", "Mas"}, + {"Zwat Juwung", "Zwat Swiyang", "Zwat Tsat", "Zwat Nyai", "Zwat Tswon", "Zwat Ataah", "Zwat Anatat", "Zwat Arinai", "Zwat Akubunyung", "Zwat Swag", "Zwat Mangjuwang", "Zwat Swag-Ma-Suyang"}, + {}, +} + +var localeTableKcgNG = [5][]string{ + {"Lad", "Tan", "Tal", "Lar", "Lam", "Jum", "Asa"}, + {"Ladi", "Tanii", "Talata", "Larba", "Lamit", "Juma", "Asabat"}, + {"Juw", "Swi", "Tsa", "Nya", "Tsw", "Ata", "Ana", "Ari", "Aku", "Swa", "Man", "Mas"}, + {"Zwat Juwung", "Zwat Swiyang", "Zwat Tsat", "Zwat Nyai", "Zwat Tswon", "Zwat Ataah", "Zwat Anatat", "Zwat Arinai", "Zwat Akubunyung", "Zwat Swag", "Zwat Mangjuwang", "Zwat Swag-Ma-Suyang"}, + {}, +} + +var localeTableKde = [5][]string{ + {"Ll2", "Ll3", "Ll4", "Ll5", "Ll6", "Ll7", "Ll1"}, + {"Liduva lyapili", "Liduva lyatatu", "Liduva lyanchechi", "Liduva lyannyano", "Liduva lyannyano na linji", "Liduva lyannyano na mavili", "Liduva litandi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Mwedi Ntandi", "Mwedi wa Pili", "Mwedi wa Tatu", "Mwedi wa Nchechi", "Mwedi wa Nnyano", "Mwedi wa Nnyano na Umo", "Mwedi wa Nnyano na Mivili", "Mwedi wa Nnyano na Mitatu", "Mwedi wa Nnyano na Nchechi", "Mwedi wa Nnyano na Nnyano", "Mwedi wa Nnyano na Nnyano na U", "Mwedi wa Nnyano na Nnyano na M"}, + {"Muhi", "Chilo"}, +} + +var localeTableKdeTZ = [5][]string{ + {"Ll2", "Ll3", "Ll4", "Ll5", "Ll6", "Ll7", "Ll1"}, + {"Liduva lyapili", "Liduva lyatatu", "Liduva lyanchechi", "Liduva lyannyano", "Liduva lyannyano na linji", "Liduva lyannyano na mavili", "Liduva litandi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Mwedi Ntandi", "Mwedi wa Pili", "Mwedi wa Tatu", "Mwedi wa Nchechi", "Mwedi wa Nnyano", "Mwedi wa Nnyano na Umo", "Mwedi wa Nnyano na Mivili", "Mwedi wa Nnyano na Mitatu", "Mwedi wa Nnyano na Nchechi", "Mwedi wa Nnyano na Nnyano", "Mwedi wa Nnyano na Nnyano na U", "Mwedi wa Nnyano na Nnyano na M"}, + {"Muhi", "Chilo"}, +} + +var localeTableKea = [5][]string{ + {"dum", "sig", "ter", "kua", "kin", "ses", "sab"}, + {"dumingu", "sigunda-fera", "tersa-fera", "kuarta-fera", "kinta-fera", "sesta-fera", "sábadu"}, + {"Jan", "Feb", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Otu", "Nuv", "Diz"}, + {"Janeru", "Febreru", "Marsu", "Abril", "Maiu", "Junhu", "Julhu", "Agostu", "Setenbru", "Otubru", "Nuvenbru", "Dizenbru"}, + {"am", "pm"}, +} + +var localeTableKeaCV = [5][]string{ + {"dum", "sig", "ter", "kua", "kin", "ses", "sab"}, + {"dumingu", "sigunda-fera", "tersa-fera", "kuarta-fera", "kinta-fera", "sesta-fera", "sábadu"}, + {"Jan", "Feb", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Otu", "Nuv", "Diz"}, + {"Janeru", "Febreru", "Marsu", "Abril", "Maiu", "Junhu", "Julhu", "Agostu", "Setenbru", "Otubru", "Nuvenbru", "Dizenbru"}, + {"am", "pm"}, +} + +var localeTableKgp = [5][]string{ + {"num.", "pir.", "rég.", "tẽg.", "vẽn.", "pén.", "sav."}, + {"numĩggu", "pir-kurã-há", "régre-kurã-há", "tẽgtũ-kurã-há", "vẽnhkãgra-kurã-há", "pénkar-kurã-há", "savnu"}, + {"1Ky.", "2Ky.", "3Ky.", "4Ky.", "5Ky.", "6Ky.", "7Ky.", "8Ky.", "9Ky.", "10Ky.", "11Ky.", "12Ky."}, + {"1-Kysã", "2-Kysã", "3-Kysã", "4-Kysã", "5-Kysã", "6-Kysã", "7-Kysã", "8-Kysã", "9-Kysã", "10-Kysã", "11-Kysã", "12-Kysã"}, + {}, +} + +var localeTableKgpBR = [5][]string{ + {"num.", "pir.", "rég.", "tẽg.", "vẽn.", "pén.", "sav."}, + {"numĩggu", "pir-kurã-há", "régre-kurã-há", "tẽgtũ-kurã-há", "vẽnhkãgra-kurã-há", "pénkar-kurã-há", "savnu"}, + {"1Ky.", "2Ky.", "3Ky.", "4Ky.", "5Ky.", "6Ky.", "7Ky.", "8Ky.", "9Ky.", "10Ky.", "11Ky.", "12Ky."}, + {"1-Kysã", "2-Kysã", "3-Kysã", "4-Kysã", "5-Kysã", "6-Kysã", "7-Kysã", "8-Kysã", "9-Kysã", "10-Kysã", "11-Kysã", "12-Kysã"}, + {}, +} + +var localeTableKhq = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alj", "Ass"}, + {"Alhadi", "Atini", "Atalata", "Alarba", "Alhamiisa", "Aljuma", "Assabdu"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableKhqML = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alj", "Ass"}, + {"Alhadi", "Atini", "Atalata", "Alarba", "Alhamiisa", "Aljuma", "Assabdu"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableKi = [5][]string{ + {"KMA", "NTT", "NMN", "NMT", "ART", "NMA", "NMM"}, + {"Kiumia", "Njumatatũ", "Njumaine", "Njumatana", "Aramithi", "Njumaa", "Njumamothi"}, + {"JEN", "WKR", "WGT", "WKN", "WTN", "WTD", "WMJ", "WNN", "WKD", "WIK", "WMW", "DIT"}, + {"Njenuarĩ", "Mwere wa kerĩ", "Mwere wa gatatũ", "Mwere wa kana", "Mwere wa gatano", "Mwere wa gatandatũ", "Mwere wa mũgwanja", "Mwere wa kanana", "Mwere wa kenda", "Mwere wa ikũmi", "Mwere wa ikũmi na ũmwe", "Ndithemba"}, + {"Kiroko", "Hwaĩ-inĩ"}, +} + +var localeTableKiKE = [5][]string{ + {"KMA", "NTT", "NMN", "NMT", "ART", "NMA", "NMM"}, + {"Kiumia", "Njumatatũ", "Njumaine", "Njumatana", "Aramithi", "Njumaa", "Njumamothi"}, + {"JEN", "WKR", "WGT", "WKN", "WTN", "WTD", "WMJ", "WNN", "WKD", "WIK", "WMW", "DIT"}, + {"Njenuarĩ", "Mwere wa kerĩ", "Mwere wa gatatũ", "Mwere wa kana", "Mwere wa gatano", "Mwere wa gatandatũ", "Mwere wa mũgwanja", "Mwere wa kanana", "Mwere wa kenda", "Mwere wa ikũmi", "Mwere wa ikũmi na ũmwe", "Ndithemba"}, + {"Kiroko", "Hwaĩ-inĩ"}, +} + +var localeTableKk = [5][]string{ + {"жс", "дс", "сс", "ср", "бс", "жм", "сб"}, + {"жексенбі", "дүйсенбі", "сейсенбі", "сәрсенбі", "бейсенбі", "жұма", "сенбі"}, + {"қаң.", "ақп.", "нау.", "сәу.", "мам.", "мау.", "шіл.", "там.", "қыр.", "қаз.", "қар.", "жел."}, + {"қаңтар", "ақпан", "наурыз", "сәуір", "мамыр", "маусым", "шілде", "тамыз", "қыркүйек", "қазан", "қараша", "желтоқсан"}, + {}, +} + +var localeTableKkKZ = [5][]string{ + {"жс", "дс", "сс", "ср", "бс", "жм", "сб"}, + {"жексенбі", "дүйсенбі", "сейсенбі", "сәрсенбі", "бейсенбі", "жұма", "сенбі"}, + {"қаң.", "ақп.", "нау.", "сәу.", "мам.", "мау.", "шіл.", "там.", "қыр.", "қаз.", "қар.", "жел."}, + {"қаңтар", "ақпан", "наурыз", "сәуір", "мамыр", "маусым", "шілде", "тамыз", "қыркүйек", "қазан", "қараша", "желтоқсан"}, + {}, +} + +var localeTableKkj = [5][]string{ + {}, + {"sɔndi", "lundi", "mardi", "mɛrkɛrɛdi", "yedi", "vaŋdɛrɛdi", "mɔnɔ sɔndi"}, + {}, + {"pamba", "wanja", "mbiyɔ mɛndoŋgɔ", "Nyɔlɔmbɔŋgɔ", "Mɔnɔ ŋgbanja", "Nyaŋgwɛ ŋgbanja", "kuŋgwɛ", "fɛ", "njapi", "nyukul", "M11", "ɓulɓusɛ"}, + {}, +} + +var localeTableKkjCM = [5][]string{ + {}, + {"sɔndi", "lundi", "mardi", "mɛrkɛrɛdi", "yedi", "vaŋdɛrɛdi", "mɔnɔ sɔndi"}, + {}, + {"pamba", "wanja", "mbiyɔ mɛndoŋgɔ", "Nyɔlɔmbɔŋgɔ", "Mɔnɔ ŋgbanja", "Nyaŋgwɛ ŋgbanja", "kuŋgwɛ", "fɛ", "njapi", "nyukul", "M11", "ɓulɓusɛ"}, + {}, +} + +var localeTableKl = [5][]string{ + {"sap", "ata", "mar", "pin", "sis", "tal", "arf"}, + {"sapaat", "ataasinngorneq", "marlunngorneq", "pingasunngorneq", "sisamanngorneq", "tallimanngorneq", "arfininngorneq"}, + {"jan", "febr", "mar", "apr", "maj", "jun", "jul", "aug", "sept", "okt", "nov", "dec"}, + {"januaarip", "februaarip", "marsip", "apriilip", "maajip", "juunip", "juulip", "aggustip", "septembarip", "oktobarip", "novembarip", "decembarip"}, + {"u.t.", "u.k."}, +} + +var localeTableKlGL = [5][]string{ + {"sap", "ata", "mar", "pin", "sis", "tal", "arf"}, + {"sapaat", "ataasinngorneq", "marlunngorneq", "pingasunngorneq", "sisamanngorneq", "tallimanngorneq", "arfininngorneq"}, + {"jan", "febr", "mar", "apr", "maj", "jun", "jul", "aug", "sept", "okt", "nov", "dec"}, + {"januaarip", "februaarip", "marsip", "apriilip", "maajip", "juunip", "juulip", "aggustip", "septembarip", "oktobarip", "novembarip", "decembarip"}, + {"u.t.", "u.k."}, +} + +var localeTableKln = [5][]string{ + {"Kts", "Kot", "Koo", "Kos", "Koa", "Kom", "Kol"}, + {"Kotisap", "Kotaai", "Koaeng’", "Kosomok", "Koang’wan", "Komuut", "Kolo"}, + {"Mul", "Ngat", "Taa", "Iwo", "Mam", "Paa", "Nge", "Roo", "Bur", "Epe", "Kpt", "Kpa"}, + {"Mulgul", "Ng’atyaato", "Kiptaamo", "Iwootkuut", "Mamuut", "Paagi", "Ng’eiyeet", "Rooptui", "Bureet", "Epeeso", "Kipsuunde ne taai", "Kipsuunde nebo aeng’"}, + {"krn", "koosk"}, +} + +var localeTableKlnKE = [5][]string{ + {"Kts", "Kot", "Koo", "Kos", "Koa", "Kom", "Kol"}, + {"Kotisap", "Kotaai", "Koaeng’", "Kosomok", "Koang’wan", "Komuut", "Kolo"}, + {"Mul", "Ngat", "Taa", "Iwo", "Mam", "Paa", "Nge", "Roo", "Bur", "Epe", "Kpt", "Kpa"}, + {"Mulgul", "Ng’atyaato", "Kiptaamo", "Iwootkuut", "Mamuut", "Paagi", "Ng’eiyeet", "Rooptui", "Bureet", "Epeeso", "Kipsuunde ne taai", "Kipsuunde nebo aeng’"}, + {"krn", "koosk"}, +} + +var localeTableKm = [5][]string{ + {"អាទិត្យ", "ចន្ទ", "អង្គារ", "ពុធ", "ព្រហ", "សុក្រ", "សៅរ៍"}, + {"អាទិត្យ", "ច័ន្ទ", "អង្គារ", "ពុធ", "ព្រហស្បតិ៍", "សុក្រ", "សៅរ៍"}, + {}, + {"មករា", "កុម្ភៈ", "មីនា", "មេសា", "ឧសភា", "មិថុនា", "កក្កដា", "សីហា", "កញ្ញា", "តុលា", "វិច្ឆិកា", "ធ្នូ"}, + {"a", "p"}, +} + +var localeTableKmKH = [5][]string{ + {"អាទិត្យ", "ចន្ទ", "អង្គារ", "ពុធ", "ព្រហ", "សុក្រ", "សៅរ៍"}, + {"អាទិត្យ", "ច័ន្ទ", "អង្គារ", "ពុធ", "ព្រហស្បតិ៍", "សុក្រ", "សៅរ៍"}, + {}, + {"មករា", "កុម្ភៈ", "មីនា", "មេសា", "ឧសភា", "មិថុនា", "កក្កដា", "សីហា", "កញ្ញា", "តុលា", "វិច្ឆិកា", "ធ្នូ"}, + {"a", "p"}, +} + +var localeTableKn = [5][]string{ + {"ಭಾನು", "ಸೋಮ", "ಮಂಗಳ", "ಬುಧ", "ಗುರು", "ಶುಕ್ರ", "ಶನಿ"}, + {"ಭಾನುವಾರ", "ಸೋಮವಾರ", "ಮಂಗಳವಾರ", "ಬುಧವಾರ", "ಗುರುವಾರ", "ಶುಕ್ರವಾರ", "ಶನಿವಾರ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿ", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂ", "ಅಕ್ಟೋ", "ನವೆಂ", "ಡಿಸೆಂ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿಲ್", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂಬರ್", "ಅಕ್ಟೋಬರ್", "ನವೆಂಬರ್", "ಡಿಸೆಂಬರ್"}, + {"ಪೂರ್ವಾಹ್ನ", "ಅಪರಾಹ್ನ"}, +} + +var localeTableKnIN = [5][]string{ + {"ಭಾನು", "ಸೋಮ", "ಮಂಗಳ", "ಬುಧ", "ಗುರು", "ಶುಕ್ರ", "ಶನಿ"}, + {"ಭಾನುವಾರ", "ಸೋಮವಾರ", "ಮಂಗಳವಾರ", "ಬುಧವಾರ", "ಗುರುವಾರ", "ಶುಕ್ರವಾರ", "ಶನಿವಾರ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿ", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂ", "ಅಕ್ಟೋ", "ನವೆಂ", "ಡಿಸೆಂ"}, + {"ಜನವರಿ", "ಫೆಬ್ರವರಿ", "ಮಾರ್ಚ್", "ಏಪ್ರಿಲ್", "ಮೇ", "ಜೂನ್", "ಜುಲೈ", "ಆಗಸ್ಟ್", "ಸೆಪ್ಟೆಂಬರ್", "ಅಕ್ಟೋಬರ್", "ನವೆಂಬರ್", "ಡಿಸೆಂಬರ್"}, + {"ಪೂರ್ವಾಹ್ನ", "ಅಪರಾಹ್ನ"}, +} + +var localeTableKo = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKoCN = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKoKP = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKoKR = [5][]string{ + {"일", "월", "화", "수", "목", "금", "토"}, + {"일요일", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일"}, + {}, + {"1월", "2월", "3월", "4월", "5월", "6월", "7월", "8월", "9월", "10월", "11월", "12월"}, + {}, +} + +var localeTableKok = [5][]string{ + {}, + {"आयतार", "सोमार", "मंगळार", "बुधवार", "बिरेस्तार", "शुक्रार", "शेनवार"}, + {}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रील", "मे", "जून", "जुलय", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableKokIN = [5][]string{ + {}, + {"आयतार", "सोमार", "मंगळार", "बुधवार", "बिरेस्तार", "शुक्रार", "शेनवार"}, + {}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रील", "मे", "जून", "जुलय", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableKs = [5][]string{ + {"آتھوار", "ژٔندٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"اَتھوار", "ژٔندرٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دسمبر"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دَسَمبَر"}, + {"a", "p"}, +} + +var localeTableKsArab = [5][]string{ + {"آتھوار", "ژٔندٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"اَتھوار", "ژٔندرٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دسمبر"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دَسَمبَر"}, + {"a", "p"}, +} + +var localeTableKsArabIN = [5][]string{ + {"آتھوار", "ژٔندٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"اَتھوار", "ژٔندرٕروار", "بۆموار", "بودوار", "برؠسوار", "جُمہ", "بٹوار"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دسمبر"}, + {"جنؤری", "فرؤری", "مارٕچ", "اپریل", "مئی", "جوٗن", "جُلَے", "اگست", "ستمبر", "اکتوٗبر", "نومبر", "دَسَمبَر"}, + {"a", "p"}, +} + +var localeTableKsDeva = [5][]string{ + {"आथवार", "चंदिरवार", "बुवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"आथवार", "च़ंदिरवार", "बोमवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतुंबर", "अक्तूबर", "नवूमबर", "दसूमबर"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतमबर", "अक्तूबर", "नवमबर", "दसमबर"}, + {}, +} + +var localeTableKsDevaIN = [5][]string{ + {"आथवार", "चंदिरवार", "बुवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"आथवार", "च़ंदिरवार", "बोमवार", "बोदवार", "ब्रेसवार", "जुमा", "बटवार"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतुंबर", "अक्तूबर", "नवूमबर", "दसूमबर"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मे", "जून", "जुलाई", "अगस्त", "सतमबर", "अक्तूबर", "नवमबर", "दसमबर"}, + {}, +} + +var localeTableKsb = [5][]string{ + {"Jpi", "Jtt", "Jmn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumaapii", "Jumaatatu", "Jumaane", "Jumaatano", "Alhamisi", "Ijumaa", "Jumaamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januali", "Febluali", "Machi", "Aplili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"makeo", "nyiaghuo"}, +} + +var localeTableKsbTZ = [5][]string{ + {"Jpi", "Jtt", "Jmn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumaapii", "Jumaatatu", "Jumaane", "Jumaatano", "Alhamisi", "Ijumaa", "Jumaamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januali", "Febluali", "Machi", "Aplili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"makeo", "nyiaghuo"}, +} + +var localeTableKsf = [5][]string{ + {"sɔ́n", "lǝn", "maa", "mɛk", "jǝǝ", "júm", "sam"}, + {"sɔ́ndǝ", "lǝndí", "maadí", "mɛkrɛdí", "jǝǝdí", "júmbá", "samdí"}, + {"ŋ1", "ŋ2", "ŋ3", "ŋ4", "ŋ5", "ŋ6", "ŋ7", "ŋ8", "ŋ9", "ŋ10", "ŋ11", "ŋ12"}, + {"ŋwíí a ntɔ́ntɔ", "ŋwíí akǝ bɛ́ɛ", "ŋwíí akǝ ráá", "ŋwíí akǝ nin", "ŋwíí akǝ táan", "ŋwíí akǝ táafɔk", "ŋwíí akǝ táabɛɛ", "ŋwíí akǝ táaraa", "ŋwíí akǝ táanin", "ŋwíí akǝ ntɛk", "ŋwíí akǝ ntɛk di bɔ́k", "ŋwíí akǝ ntɛk di bɛ́ɛ"}, + {"sárúwá", "cɛɛ́nko"}, +} + +var localeTableKsfCM = [5][]string{ + {"sɔ́n", "lǝn", "maa", "mɛk", "jǝǝ", "júm", "sam"}, + {"sɔ́ndǝ", "lǝndí", "maadí", "mɛkrɛdí", "jǝǝdí", "júmbá", "samdí"}, + {"ŋ1", "ŋ2", "ŋ3", "ŋ4", "ŋ5", "ŋ6", "ŋ7", "ŋ8", "ŋ9", "ŋ10", "ŋ11", "ŋ12"}, + {"ŋwíí a ntɔ́ntɔ", "ŋwíí akǝ bɛ́ɛ", "ŋwíí akǝ ráá", "ŋwíí akǝ nin", "ŋwíí akǝ táan", "ŋwíí akǝ táafɔk", "ŋwíí akǝ táabɛɛ", "ŋwíí akǝ táaraa", "ŋwíí akǝ táanin", "ŋwíí akǝ ntɛk", "ŋwíí akǝ ntɛk di bɔ́k", "ŋwíí akǝ ntɛk di bɛ́ɛ"}, + {"sárúwá", "cɛɛ́nko"}, +} + +var localeTableKsh = [5][]string{ + {"Su.", "Mo.", "Di.", "Me.", "Du.", "Fr.", "Sa."}, + {"Sunndaach", "Mohndaach", "Dinnsdaach", "Metwoch", "Dunnersdaach", "Friidaach", "Samsdaach"}, + {"Jan", "Fäb", "Mäz", "Apr", "Mai", "Jun", "Jul", "Ouj", "Säp", "Okt", "Nov", "Dez"}, + {"Jannewa", "Fäbrowa", "Määz", "Aprell", "Mai", "Juuni", "Juuli", "Oujoß", "Septämber", "Oktohber", "Novämber", "Dezämber"}, + {"v.M.", "n.M."}, +} + +var localeTableKshDE = [5][]string{ + {"Su.", "Mo.", "Di.", "Me.", "Du.", "Fr.", "Sa."}, + {"Sunndaach", "Mohndaach", "Dinnsdaach", "Metwoch", "Dunnersdaach", "Friidaach", "Samsdaach"}, + {"Jan", "Fäb", "Mäz", "Apr", "Mai", "Jun", "Jul", "Ouj", "Säp", "Okt", "Nov", "Dez"}, + {"Jannewa", "Fäbrowa", "Määz", "Aprell", "Mai", "Juuni", "Juuli", "Oujoß", "Septämber", "Oktohber", "Novämber", "Dezämber"}, + {"v.M.", "n.M."}, +} + +var localeTableKu = [5][]string{ + {"yşm", "dşm", "sşm", "çşm", "pşm", "înî", "şem"}, + {"yekşem", "duşem", "sêşem", "çarşem", "pêncşem", "înî", "şemî"}, + {"rbn", "sbt", "adr", "nsn", "gln", "hzr", "trm", "tbx", "îln", "cot", "mjd", "brf"}, + {"rêbendan", "sibat", "adar", "nîsan", "gulan", "hezîran", "tîrmeh", "tebax", "îlon", "cotmeh", "mijdar", "berfanbar"}, + {"BN", "PN"}, +} + +var localeTableKuTR = [5][]string{ + {"yşm", "dşm", "sşm", "çşm", "pşm", "înî", "şem"}, + {"yekşem", "duşem", "sêşem", "çarşem", "pêncşem", "înî", "şemî"}, + {"rbn", "sbt", "adr", "nsn", "gln", "hzr", "trm", "tbx", "îln", "cot", "mjd", "brf"}, + {"rêbendan", "sibat", "adar", "nîsan", "gulan", "hezîran", "tîrmeh", "tebax", "îlon", "cotmeh", "mijdar", "berfanbar"}, + {"BN", "PN"}, +} + +var localeTableKw = [5][]string{ + {"Sul", "Lun", "Mth", "Mhr", "Yow", "Gwe", "Sad"}, + {"dy Sul", "dy Lun", "dy Meurth", "dy Merher", "dy Yow", "dy Gwener", "dy Sadorn"}, + {"Gen", "Hwe", "Meu", "Ebr", "Me", "Met", "Gor", "Est", "Gwn", "Hed", "Du", "Kev"}, + {"mis Genver", "mis Hwevrer", "mis Meurth", "mis Ebrel", "mis Me", "mis Metheven", "mis Gortheren", "mis Est", "mis Gwynngala", "mis Hedra", "mis Du", "mis Kevardhu"}, + {"a.m.", "p.m."}, +} + +var localeTableKwGB = [5][]string{ + {"Sul", "Lun", "Mth", "Mhr", "Yow", "Gwe", "Sad"}, + {"dy Sul", "dy Lun", "dy Meurth", "dy Merher", "dy Yow", "dy Gwener", "dy Sadorn"}, + {"Gen", "Hwe", "Meu", "Ebr", "Me", "Met", "Gor", "Est", "Gwn", "Hed", "Du", "Kev"}, + {"mis Genver", "mis Hwevrer", "mis Meurth", "mis Ebrel", "mis Me", "mis Metheven", "mis Gortheren", "mis Est", "mis Gwynngala", "mis Hedra", "mis Du", "mis Kevardhu"}, + {"a.m.", "p.m."}, +} + +var localeTableKxv = [5][]string{ + {"aadi", "smba", "manga", "puda", "laki", "sukru", "sani"}, + {"aadi vara", "smbara", "mangaḍa", "pudara", "laki vara", "sukru vara", "sani vara"}, + {"pusu", "maha", "pagu", "hire", "bese", "jaṭṭa", "aasaḍi", "srabĩ", "bado", "dasara", "divi", "pande"}, + {"pusu lenju", "maha lenju", "pagu lenju", "hire lenju", "bese lenju", "jaṭṭa lenju", "aasaḍi lenju", "srabĩ lenju", "bado lenju", "dasara lenju", "divi lenju", "pande lenju"}, + {"am", "pm"}, +} + +var localeTableKxvDeva = [5][]string{ + {}, + {"आदि वारा", "साॅम्वारा", "मंगाड़ा", "पुद्दारा", "लाक्कि वारा", "सुकुरु वारा", "सान्नि वारा"}, + {}, + {"पुसु लेञ्जु", "माहाका लेञ्जु", "पागुणी लेञ्जु", "हिरे लेञ्जु", "बेसे लेञ्जु", "जाटा लेञ्जु", "आसाड़ी लेञ्जु", "स्राबाँ लेञ्जु", "बोदो लेञ्जु", "दसारा लेञ्जु", "दिवी लेञ्जु", "पान्डे लेञ्जु"}, + {}, +} + +var localeTableKxvDevaIN = [5][]string{ + {}, + {"आदि वारा", "साॅम्वारा", "मंगाड़ा", "पुद्दारा", "लाक्कि वारा", "सुकुरु वारा", "सान्नि वारा"}, + {}, + {"पुसु लेञ्जु", "माहाका लेञ्जु", "पागुणी लेञ्जु", "हिरे लेञ्जु", "बेसे लेञ्जु", "जाटा लेञ्जु", "आसाड़ी लेञ्जु", "स्राबाँ लेञ्जु", "बोदो लेञ्जु", "दसारा लेञ्जु", "दिवी लेञ्जु", "पान्डे लेञ्जु"}, + {}, +} + +var localeTableKxvOrya = [5][]string{ + {}, + {"ଆଦି ୱାରା", "ସମ୍ବାରା", "ମାଙ୍ଗାଡ଼ା", "ପୁଦାରା", "ଲାକି ୱାରା", "ସୁକ୍ରୁ ୱାରା", "ସାନି ୱାରା"}, + {}, + {"ପୁସୁ ଲେଞ୍ଜୁ", "ମାହାକା ଲେଞ୍ଜୁ", "ପାଗୁଣି ଲେଞ୍ଜୁ", "ହିରେ ଲେଞ୍ଜୁ", "ବେସେ ଲେଞ୍ଜୁ", "ଜାଟା ଲେଞ୍ଜୁ", "ଆସାଡ଼ି ଲେଞ୍ଜୁ", "ସ୍ରାବାଁ ଲେଞ୍ଜୁ", "ବଦ ଲେଞ୍ଜୁ", "ଦାସାରା ଲେଞ୍ଜୁ", "ଦିୱିଡ଼ି ଲେଞ୍ଜୁ", "ପାଣ୍ଡେ ଲେଞ୍ଜୁ"}, + {}, +} + +var localeTableKxvOryaIN = [5][]string{ + {}, + {"ଆଦି ୱାରା", "ସମ୍ବାରା", "ମାଙ୍ଗାଡ଼ା", "ପୁଦାରା", "ଲାକି ୱାରା", "ସୁକ୍ରୁ ୱାରା", "ସାନି ୱାରା"}, + {}, + {"ପୁସୁ ଲେଞ୍ଜୁ", "ମାହାକା ଲେଞ୍ଜୁ", "ପାଗୁଣି ଲେଞ୍ଜୁ", "ହିରେ ଲେଞ୍ଜୁ", "ବେସେ ଲେଞ୍ଜୁ", "ଜାଟା ଲେଞ୍ଜୁ", "ଆସାଡ଼ି ଲେଞ୍ଜୁ", "ସ୍ରାବାଁ ଲେଞ୍ଜୁ", "ବଦ ଲେଞ୍ଜୁ", "ଦାସାରା ଲେଞ୍ଜୁ", "ଦିୱିଡ଼ି ଲେଞ୍ଜୁ", "ପାଣ୍ଡେ ଲେଞ୍ଜୁ"}, + {}, +} + +var localeTableKxvTelu = [5][]string{ + {}, + {"వారమి", "నమారా", "మాంగాడా", "వుదారా", "లాకివరా", "నుక్ వరా", "సానివరా"}, + {}, + {"మాగ", "గుండు", "హిరెఇ", "బెసెకి", "లండి", "రాత", "బాన్దపాణా", "బార్సి", "అస్ర", "దివెడి", "పాండు", "పుసు"}, + {}, +} + +var localeTableKxvTeluIN = [5][]string{ + {}, + {"వారమి", "నమారా", "మాంగాడా", "వుదారా", "లాకివరా", "నుక్ వరా", "సానివరా"}, + {}, + {"మాగ", "గుండు", "హిరెఇ", "బెసెకి", "లండి", "రాత", "బాన్దపాణా", "బార్సి", "అస్ర", "దివెడి", "పాండు", "పుసు"}, + {}, +} + +var localeTableKy = [5][]string{ + {"жек.", "дүй.", "шейш.", "шарш.", "бейш.", "жума", "ишм."}, + {"жекшемби", "дүйшөмбү", "шейшемби", "шаршемби", "бейшемби", "жума", "ишемби"}, + {"янв.", "фев.", "мар.", "апр.", "май", "июн.", "июл.", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {"тң", "тк"}, +} + +var localeTableKyKG = [5][]string{ + {"жек.", "дүй.", "шейш.", "шарш.", "бейш.", "жума", "ишм."}, + {"жекшемби", "дүйшөмбү", "шейшемби", "шаршемби", "бейшемби", "жума", "ишемби"}, + {"янв.", "фев.", "мар.", "апр.", "май", "июн.", "июл.", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {"тң", "тк"}, +} + +var localeTableLa = [5][]string{ + {"Dom", "Lun", "Mar", "Mer", "Iov", "Ven", "Sab"}, + {"Dominica", "dies Lunae", "dies Martis", "dies Mercurii", "dies Iovis", "dies Veneris", "dies Sabbati"}, + {"Ian", "Feb", "Mar", "Apr", "Mai", "Iun", "Iul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"Ianuarii", "Februarii", "Martii", "Aprilis", "Maii", "Iunii", "Iulii", "Augusti", "Septembris", "Octobris", "Novembris", "Decembris"}, + {"a.m.", "p.m."}, +} + +var localeTableLaVA = [5][]string{ + {"Dom", "Lun", "Mar", "Mer", "Iov", "Ven", "Sab"}, + {"Dominica", "dies Lunae", "dies Martis", "dies Mercurii", "dies Iovis", "dies Veneris", "dies Sabbati"}, + {"Ian", "Feb", "Mar", "Apr", "Mai", "Iun", "Iul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"Ianuarii", "Februarii", "Martii", "Aprilis", "Maii", "Iunii", "Iulii", "Augusti", "Septembris", "Octobris", "Novembris", "Decembris"}, + {"a.m.", "p.m."}, +} + +var localeTableLag = [5][]string{ + {"Píili", "Táatu", "Íne", "Táano", "Alh", "Ijm", "Móosi"}, + {"Jumapíiri", "Jumatátu", "Jumaíne", "Jumatáano", "Alamíisi", "Ijumáa", "Jumamóosi"}, + {"Fúngatɨ", "Naanɨ", "Keenda", "Ikúmi", "Inyambala", "Idwaata", "Mʉʉnchɨ", "Vɨɨrɨ", "Saatʉ", "Inyi", "Saano", "Sasatʉ"}, + {"Kʉfúngatɨ", "Kʉnaanɨ", "Kʉkeenda", "Kwiikumi", "Kwiinyambála", "Kwiidwaata", "Kʉmʉʉnchɨ", "Kʉvɨɨrɨ", "Kʉsaatʉ", "Kwiinyi", "Kʉsaano", "Kʉsasatʉ"}, + {"TOO", "MUU"}, +} + +var localeTableLagTZ = [5][]string{ + {"Píili", "Táatu", "Íne", "Táano", "Alh", "Ijm", "Móosi"}, + {"Jumapíiri", "Jumatátu", "Jumaíne", "Jumatáano", "Alamíisi", "Ijumáa", "Jumamóosi"}, + {"Fúngatɨ", "Naanɨ", "Keenda", "Ikúmi", "Inyambala", "Idwaata", "Mʉʉnchɨ", "Vɨɨrɨ", "Saatʉ", "Inyi", "Saano", "Sasatʉ"}, + {"Kʉfúngatɨ", "Kʉnaanɨ", "Kʉkeenda", "Kwiikumi", "Kwiinyambála", "Kwiidwaata", "Kʉmʉʉnchɨ", "Kʉvɨɨrɨ", "Kʉsaatʉ", "Kwiinyi", "Kʉsaano", "Kʉsasatʉ"}, + {"TOO", "MUU"}, +} + +var localeTableLb = [5][]string{ + {"Son.", "Méi.", "Dën.", "Mët.", "Don.", "Fre.", "Sam."}, + {"Sonndeg", "Méindeg", "Dënschdeg", "Mëttwoch", "Donneschdeg", "Freideg", "Samschdeg"}, + {"Jan.", "Feb.", "Mäe.", "Abr.", "Mee", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "Mäerz", "Abrëll", "Mee", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {"moies", "nomëttes"}, +} + +var localeTableLbLU = [5][]string{ + {"Son.", "Méi.", "Dën.", "Mët.", "Don.", "Fre.", "Sam."}, + {"Sonndeg", "Méindeg", "Dënschdeg", "Mëttwoch", "Donneschdeg", "Freideg", "Samschdeg"}, + {"Jan.", "Feb.", "Mäe.", "Abr.", "Mee", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januar", "Februar", "Mäerz", "Abrëll", "Mee", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"}, + {"moies", "nomëttes"}, +} + +var localeTableLg = [5][]string{ + {"Sab", "Bal", "Lw2", "Lw3", "Lw4", "Lw5", "Lw6"}, + {"Sabbiiti", "Balaza", "Lwakubiri", "Lwakusatu", "Lwakuna", "Lwakutaano", "Lwamukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {}, +} + +var localeTableLgUG = [5][]string{ + {"Sab", "Bal", "Lw2", "Lw3", "Lw4", "Lw5", "Lw6"}, + {"Sabbiiti", "Balaza", "Lwakubiri", "Lwakusatu", "Lwakuna", "Lwakutaano", "Lwamukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {}, +} + +var localeTableLij = [5][]string{ + {"dom.", "lun.", "mät.", "mäc.", "zeu.", "ven.", "sab."}, + {"domenega", "lunesdì", "mätesdì", "mäcordì", "zeuggia", "venardì", "sabbo"}, + {"de zen.", "de fre.", "de mar.", "d’arv.", "de maz.", "de zug.", "de lug.", "d’ago.", "de set.", "d’ott.", "de nov.", "de dex."}, + {"de zenâ", "de frevâ", "de marso", "d’arvî", "de mazzo", "de zugno", "de luggio", "d’agosto", "de settembre", "d’ottobre", "de novembre", "de dexembre"}, + {"m.", "p."}, +} + +var localeTableLijIT = [5][]string{ + {"dom.", "lun.", "mät.", "mäc.", "zeu.", "ven.", "sab."}, + {"domenega", "lunesdì", "mätesdì", "mäcordì", "zeuggia", "venardì", "sabbo"}, + {"de zen.", "de fre.", "de mar.", "d’arv.", "de maz.", "de zug.", "de lug.", "d’ago.", "de set.", "d’ott.", "de nov.", "de dex."}, + {"de zenâ", "de frevâ", "de marso", "d’arvî", "de mazzo", "de zugno", "de luggio", "d’agosto", "de settembre", "d’ottobre", "de novembre", "de dexembre"}, + {"m.", "p."}, +} + +var localeTableLkt = [5][]string{ + {}, + {"Aŋpétuwakȟaŋ", "Aŋpétuwaŋži", "Aŋpétunuŋpa", "Aŋpétuyamni", "Aŋpétutopa", "Aŋpétuzaptaŋ", "Owáŋgyužažapi"}, + {}, + {"Wiótheȟika Wí", "Thiyóȟeyuŋka Wí", "Ištáwičhayazaŋ Wí", "Pȟežítȟo Wí", "Čhaŋwápetȟo Wí", "Wípazukȟa-wašté Wí", "Čhaŋpȟásapa Wí", "Wasútȟuŋ Wí", "Čhaŋwápeǧi Wí", "Čhaŋwápe-kasná Wí", "Waníyetu Wí", "Tȟahékapšuŋ Wí"}, + {}, +} + +var localeTableLktUS = [5][]string{ + {}, + {"Aŋpétuwakȟaŋ", "Aŋpétuwaŋži", "Aŋpétunuŋpa", "Aŋpétuyamni", "Aŋpétutopa", "Aŋpétuzaptaŋ", "Owáŋgyužažapi"}, + {}, + {"Wiótheȟika Wí", "Thiyóȟeyuŋka Wí", "Ištáwičhayazaŋ Wí", "Pȟežítȟo Wí", "Čhaŋwápetȟo Wí", "Wípazukȟa-wašté Wí", "Čhaŋpȟásapa Wí", "Wasútȟuŋ Wí", "Čhaŋwápeǧi Wí", "Čhaŋwápe-kasná Wí", "Waníyetu Wí", "Tȟahékapšuŋ Wí"}, + {}, +} + +var localeTableLmo = [5][]string{ + {}, + {"domenega", "lundì", "mardì", "mercoldì", "sgiovedì", "venerdì", "sabet"}, + {}, + {"sginer", "fevrer", "marz", "avril", "masg", "sgiugn", "luj", "avost", "setember", "otover", "november", "dicember"}, + {}, +} + +var localeTableLmoIT = [5][]string{ + {}, + {"domenega", "lundì", "mardì", "mercoldì", "sgiovedì", "venerdì", "sabet"}, + {}, + {"sginer", "fevrer", "marz", "avril", "masg", "sgiugn", "luj", "avost", "setember", "otover", "november", "dicember"}, + {}, +} + +var localeTableLn = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnAO = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnCD = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnCF = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLnCG = [5][]string{ + {"eye", "ybo", "mbl", "mst", "min", "mtn", "mps"}, + {"eyenga", "mokɔlɔ mwa yambo", "mokɔlɔ mwa míbalé", "mokɔlɔ mwa mísáto", "mokɔlɔ ya mínéi", "mokɔlɔ ya mítáno", "mpɔ́sɔ"}, + {"yan", "fbl", "msi", "apl", "mai", "yun", "yul", "agt", "stb", "ɔtb", "nvb", "dsb"}, + {"sánzá ya yambo", "sánzá ya míbalé", "sánzá ya mísáto", "sánzá ya mínei", "sánzá ya mítáno", "sánzá ya motóbá", "sánzá ya nsambo", "sánzá ya mwambe", "sánzá ya libwa", "sánzá ya zómi", "sánzá ya zómi na mɔ̌kɔ́", "sánzá ya zómi na míbalé"}, + {"ntɔ́ngɔ́", "mpókwa"}, +} + +var localeTableLo = [5][]string{ + {"ອາທິດ", "ຈັນ", "ອັງຄານ", "ພຸດ", "ພະຫັດ", "ສຸກ", "ເສົາ"}, + {"ວັນອາທິດ", "ວັນຈັນ", "ວັນອັງຄານ", "ວັນພຸດ", "ວັນພະຫັດ", "ວັນສຸກ", "ວັນເສົາ"}, + {"ມ.ກ.", "ກ.ພ.", "ມ.ນ.", "ມ.ສ.", "ພ.ພ.", "ມິ.ຖ.", "ກ.ລ.", "ສ.ຫ.", "ກ.ຍ.", "ຕ.ລ.", "ພ.ຈ.", "ທ.ວ."}, + {"ມັງກອນ", "ກຸມພາ", "ມີນາ", "ເມສາ", "ພຶດສະພາ", "ມິຖຸນາ", "ກໍລະກົດ", "ສິງຫາ", "ກັນຍາ", "ຕຸລາ", "ພະຈິກ", "ທັນວາ"}, + {"ກ່ອນທ່ຽງ", "ຫຼັງທ່ຽງ"}, +} + +var localeTableLoLA = [5][]string{ + {"ອາທິດ", "ຈັນ", "ອັງຄານ", "ພຸດ", "ພະຫັດ", "ສຸກ", "ເສົາ"}, + {"ວັນອາທິດ", "ວັນຈັນ", "ວັນອັງຄານ", "ວັນພຸດ", "ວັນພະຫັດ", "ວັນສຸກ", "ວັນເສົາ"}, + {"ມ.ກ.", "ກ.ພ.", "ມ.ນ.", "ມ.ສ.", "ພ.ພ.", "ມິ.ຖ.", "ກ.ລ.", "ສ.ຫ.", "ກ.ຍ.", "ຕ.ລ.", "ພ.ຈ.", "ທ.ວ."}, + {"ມັງກອນ", "ກຸມພາ", "ມີນາ", "ເມສາ", "ພຶດສະພາ", "ມິຖຸນາ", "ກໍລະກົດ", "ສິງຫາ", "ກັນຍາ", "ຕຸລາ", "ພະຈິກ", "ທັນວາ"}, + {"ກ່ອນທ່ຽງ", "ຫຼັງທ່ຽງ"}, +} + +var localeTableLrc = [5][]string{ + {}, + {}, + {}, + {"جانڤیە", "فئڤریە", "مارس", "آڤریل", "مئی", "جوٙأن", "جوٙلا", "آگوست", "سئپتامر", "ئوکتوڤر", "نوڤامر", "دئسامر"}, + {}, +} + +var localeTableLrcIQ = [5][]string{ + {}, + {}, + {}, + {"جانڤیە", "فئڤریە", "مارس", "آڤریل", "مئی", "جوٙأن", "جوٙلا", "آگوست", "سئپتامر", "ئوکتوڤر", "نوڤامر", "دئسامر"}, + {}, +} + +var localeTableLrcIR = [5][]string{ + {}, + {}, + {}, + {"جانڤیە", "فئڤریە", "مارس", "آڤریل", "مئی", "جوٙأن", "جوٙلا", "آگوست", "سئپتامر", "ئوکتوڤر", "نوڤامر", "دئسامر"}, + {}, +} + +var localeTableLt = [5][]string{ + {"sk", "pr", "an", "tr", "kt", "pn", "št"}, + {"sekmadienis", "pirmadienis", "antradienis", "trečiadienis", "ketvirtadienis", "penktadienis", "šeštadienis"}, + {"saus.", "vas.", "kov.", "bal.", "geg.", "birž.", "liep.", "rugp.", "rugs.", "spal.", "lapkr.", "gruod."}, + {"sausio", "vasario", "kovo", "balandžio", "gegužės", "birželio", "liepos", "rugpjūčio", "rugsėjo", "spalio", "lapkričio", "gruodžio"}, + {"priešpiet", "popiet"}, +} + +var localeTableLtLT = [5][]string{ + {"sk", "pr", "an", "tr", "kt", "pn", "št"}, + {"sekmadienis", "pirmadienis", "antradienis", "trečiadienis", "ketvirtadienis", "penktadienis", "šeštadienis"}, + {"saus.", "vas.", "kov.", "bal.", "geg.", "birž.", "liep.", "rugp.", "rugs.", "spal.", "lapkr.", "gruod."}, + {"sausio", "vasario", "kovo", "balandžio", "gegužės", "birželio", "liepos", "rugpjūčio", "rugsėjo", "spalio", "lapkričio", "gruodžio"}, + {"priešpiet", "popiet"}, +} + +var localeTableLu = [5][]string{ + {"Lum", "Nko", "Ndy", "Ndg", "Njw", "Ngv", "Lub"}, + {"Lumingu", "Nkodya", "Ndàayà", "Ndangù", "Njòwa", "Ngòvya", "Lubingu"}, + {"Cio", "Lui", "Lus", "Muu", "Lum", "Luf", "Kab", "Lush", "Lut", "Lun", "Kas", "Cis"}, + {"Ciongo", "Lùishi", "Lusòlo", "Mùuyà", "Lumùngùlù", "Lufuimi", "Kabàlàshìpù", "Lùshìkà", "Lutongolo", "Lungùdi", "Kaswèkèsè", "Ciswà"}, + {"Dinda", "Dilolo"}, +} + +var localeTableLuCD = [5][]string{ + {"Lum", "Nko", "Ndy", "Ndg", "Njw", "Ngv", "Lub"}, + {"Lumingu", "Nkodya", "Ndàayà", "Ndangù", "Njòwa", "Ngòvya", "Lubingu"}, + {"Cio", "Lui", "Lus", "Muu", "Lum", "Luf", "Kab", "Lush", "Lut", "Lun", "Kas", "Cis"}, + {"Ciongo", "Lùishi", "Lusòlo", "Mùuyà", "Lumùngùlù", "Lufuimi", "Kabàlàshìpù", "Lùshìkà", "Lutongolo", "Lungùdi", "Kaswèkèsè", "Ciswà"}, + {"Dinda", "Dilolo"}, +} + +var localeTableLuo = [5][]string{ + {"JMP", "WUT", "TAR", "TAD", "TAN", "TAB", "NGS"}, + {"Jumapil", "Wuok Tich", "Tich Ariyo", "Tich Adek", "Tich Ang’wen", "Tich Abich", "Ngeso"}, + {"DAC", "DAR", "DAD", "DAN", "DAH", "DAU", "DAO", "DAB", "DOC", "DAP", "DGI", "DAG"}, + {"Dwe mar Achiel", "Dwe mar Ariyo", "Dwe mar Adek", "Dwe mar Ang’wen", "Dwe mar Abich", "Dwe mar Auchiel", "Dwe mar Abiriyo", "Dwe mar Aboro", "Dwe mar Ochiko", "Dwe mar Apar", "Dwe mar gi achiel", "Dwe mar Apar gi ariyo"}, + {"OD", "OT"}, +} + +var localeTableLuoKE = [5][]string{ + {"JMP", "WUT", "TAR", "TAD", "TAN", "TAB", "NGS"}, + {"Jumapil", "Wuok Tich", "Tich Ariyo", "Tich Adek", "Tich Ang’wen", "Tich Abich", "Ngeso"}, + {"DAC", "DAR", "DAD", "DAN", "DAH", "DAU", "DAO", "DAB", "DOC", "DAP", "DGI", "DAG"}, + {"Dwe mar Achiel", "Dwe mar Ariyo", "Dwe mar Adek", "Dwe mar Ang’wen", "Dwe mar Abich", "Dwe mar Auchiel", "Dwe mar Abiriyo", "Dwe mar Aboro", "Dwe mar Ochiko", "Dwe mar Apar", "Dwe mar gi achiel", "Dwe mar Apar gi ariyo"}, + {"OD", "OT"}, +} + +var localeTableLuy = [5][]string{ + {"J2", "J3", "J4", "J5", "Al", "Ij", "J1"}, + {"Jumapiri", "Jumatatu", "Jumanne", "Jumatano", "Murwa wa Kanne", "Murwa wa Katano", "Jumamosi"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"a.m.", "p.m."}, +} + +var localeTableLuyKE = [5][]string{ + {"J2", "J3", "J4", "J5", "Al", "Ij", "J1"}, + {"Jumapiri", "Jumatatu", "Jumanne", "Jumatano", "Murwa wa Kanne", "Murwa wa Katano", "Jumamosi"}, + {"Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"a.m.", "p.m."}, +} + +var localeTableLv = [5][]string{ + {"svētd.", "pirmd.", "otrd.", "trešd.", "ceturtd.", "piektd.", "sestd."}, + {"svētdiena", "pirmdiena", "otrdiena", "trešdiena", "ceturtdiena", "piektdiena", "sestdiena"}, + {"janv.", "febr.", "marts", "apr.", "maijs", "jūn.", "jūl.", "aug.", "sept.", "okt.", "nov.", "dec."}, + {"janvāris", "februāris", "marts", "aprīlis", "maijs", "jūnijs", "jūlijs", "augusts", "septembris", "oktobris", "novembris", "decembris"}, + {"priekšp.", "pēcp."}, +} + +var localeTableLvLV = [5][]string{ + {"svētd.", "pirmd.", "otrd.", "trešd.", "ceturtd.", "piektd.", "sestd."}, + {"svētdiena", "pirmdiena", "otrdiena", "trešdiena", "ceturtdiena", "piektdiena", "sestdiena"}, + {"janv.", "febr.", "marts", "apr.", "maijs", "jūn.", "jūl.", "aug.", "sept.", "okt.", "nov.", "dec."}, + {"janvāris", "februāris", "marts", "aprīlis", "maijs", "jūnijs", "jūlijs", "augusts", "septembris", "oktobris", "novembris", "decembris"}, + {"priekšp.", "pēcp."}, +} + +var localeTableMai = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रवि दिन", "सोम दिन", "मंगल दिन", "बुध दिन", "बृहस्पति दिन", "शुक्र दिन", "शनि दिन"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भोर", "सांझ"}, +} + +var localeTableMaiIN = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रवि दिन", "सोम दिन", "मंगल दिन", "बुध दिन", "बृहस्पति दिन", "शुक्र दिन", "शनि दिन"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भोर", "सांझ"}, +} + +var localeTableMas = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapílí", "Jumatátu", "Jumane", "Jumatánɔ", "Alaámisi", "Jumáa", "Jumamósi"}, + {"Dal", "Ará", "Ɔɛn", "Doy", "Lép", "Rok", "Sás", "Bɔ́r", "Kús", "Gís", "Shʉ́", "Ntʉ́"}, + {"Oladalʉ́", "Arát", "Ɔɛnɨ́ɔɨŋɔk", "Olodoyíóríê inkókúâ", "Oloilépūnyīē inkókúâ", "Kújúɔrɔk", "Mórusásin", "Ɔlɔ́ɨ́bɔ́rárɛ", "Kúshîn", "Olgísan", "Pʉshʉ́ka", "Ntʉ́ŋʉ́s"}, + {"Ɛnkakɛnyá", "Ɛndámâ"}, +} + +var localeTableMasKE = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapílí", "Jumatátu", "Jumane", "Jumatánɔ", "Alaámisi", "Jumáa", "Jumamósi"}, + {"Dal", "Ará", "Ɔɛn", "Doy", "Lép", "Rok", "Sás", "Bɔ́r", "Kús", "Gís", "Shʉ́", "Ntʉ́"}, + {"Oladalʉ́", "Arát", "Ɔɛnɨ́ɔɨŋɔk", "Olodoyíóríê inkókúâ", "Oloilépūnyīē inkókúâ", "Kújúɔrɔk", "Mórusásin", "Ɔlɔ́ɨ́bɔ́rárɛ", "Kúshîn", "Olgísan", "Pʉshʉ́ka", "Ntʉ́ŋʉ́s"}, + {"Ɛnkakɛnyá", "Ɛndámâ"}, +} + +var localeTableMasTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapílí", "Jumatátu", "Jumane", "Jumatánɔ", "Alaámisi", "Jumáa", "Jumamósi"}, + {"Dal", "Ará", "Ɔɛn", "Doy", "Lép", "Rok", "Sás", "Bɔ́r", "Kús", "Gís", "Shʉ́", "Ntʉ́"}, + {"Oladalʉ́", "Arát", "Ɔɛnɨ́ɔɨŋɔk", "Olodoyíóríê inkókúâ", "Oloilépūnyīē inkókúâ", "Kújúɔrɔk", "Mórusásin", "Ɔlɔ́ɨ́bɔ́rárɛ", "Kúshîn", "Olgísan", "Pʉshʉ́ka", "Ntʉ́ŋʉ́s"}, + {"Ɛnkakɛnyá", "Ɛndámâ"}, +} + +var localeTableMer = [5][]string{ + {"KIU", "MRA", "WAI", "WET", "WEN", "WTN", "JUM"}, + {"Kiumia", "Muramuko", "Wairi", "Wethatu", "Wena", "Wetano", "Jumamosi"}, + {"JAN", "FEB", "MAC", "ĨPU", "MĨĨ", "NJU", "NJR", "AGA", "SPT", "OKT", "NOV", "DEC"}, + {"Januarĩ", "Feburuarĩ", "Machi", "Ĩpurũ", "Mĩĩ", "Njuni", "Njuraĩ", "Agasti", "Septemba", "Oktũba", "Novemba", "Dicemba"}, + {"RŨ", "ŨG"}, +} + +var localeTableMerKE = [5][]string{ + {"KIU", "MRA", "WAI", "WET", "WEN", "WTN", "JUM"}, + {"Kiumia", "Muramuko", "Wairi", "Wethatu", "Wena", "Wetano", "Jumamosi"}, + {"JAN", "FEB", "MAC", "ĨPU", "MĨĨ", "NJU", "NJR", "AGA", "SPT", "OKT", "NOV", "DEC"}, + {"Januarĩ", "Feburuarĩ", "Machi", "Ĩpurũ", "Mĩĩ", "Njuni", "Njuraĩ", "Agasti", "Septemba", "Oktũba", "Novemba", "Dicemba"}, + {"RŨ", "ŨG"}, +} + +var localeTableMfe = [5][]string{ + {"dim", "lin", "mar", "mer", "ze", "van", "sam"}, + {"dimans", "lindi", "mardi", "merkredi", "zedi", "vandredi", "samdi"}, + {"zan", "fev", "mar", "avr", "me", "zin", "zil", "out", "sep", "okt", "nov", "des"}, + {"zanvie", "fevriye", "mars", "avril", "me", "zin", "zilye", "out", "septam", "oktob", "novam", "desam"}, + {}, +} + +var localeTableMfeMU = [5][]string{ + {"dim", "lin", "mar", "mer", "ze", "van", "sam"}, + {"dimans", "lindi", "mardi", "merkredi", "zedi", "vandredi", "samdi"}, + {"zan", "fev", "mar", "avr", "me", "zin", "zil", "out", "sep", "okt", "nov", "des"}, + {"zanvie", "fevriye", "mars", "avril", "me", "zin", "zilye", "out", "septam", "oktob", "novam", "desam"}, + {}, +} + +var localeTableMg = [5][]string{ + {"Alah", "Alats", "Tal", "Alar", "Alak", "Zom", "Asab"}, + {"Alahady", "Alatsinainy", "Talata", "Alarobia", "Alakamisy", "Zoma", "Asabotsy"}, + {"Jan", "Feb", "Mar", "Apr", "Mey", "Jon", "Jol", "Aog", "Sep", "Okt", "Nov", "Des"}, + {"Janoary", "Febroary", "Martsa", "Aprily", "Mey", "Jona", "Jolay", "Aogositra", "Septambra", "Oktobra", "Novambra", "Desambra"}, + {}, +} + +var localeTableMgMG = [5][]string{ + {"Alah", "Alats", "Tal", "Alar", "Alak", "Zom", "Asab"}, + {"Alahady", "Alatsinainy", "Talata", "Alarobia", "Alakamisy", "Zoma", "Asabotsy"}, + {"Jan", "Feb", "Mar", "Apr", "Mey", "Jon", "Jol", "Aog", "Sep", "Okt", "Nov", "Des"}, + {"Janoary", "Febroary", "Martsa", "Aprily", "Mey", "Jona", "Jolay", "Aogositra", "Septambra", "Oktobra", "Novambra", "Desambra"}, + {}, +} + +var localeTableMgh = [5][]string{ + {"Sab", "Jtt", "Jnn", "Jtn", "Ara", "Iju", "Jmo"}, + {"Sabato", "Jumatatu", "Jumanne", "Jumatano", "Arahamisi", "Ijumaa", "Jumamosi"}, + {"Kwa", "Una", "Rar", "Che", "Tha", "Moc", "Sab", "Nan", "Tis", "Kum", "Moj", "Yel"}, + {"Mweri wo kwanza", "Mweri wo unayeli", "Mweri wo uneraru", "Mweri wo unecheshe", "Mweri wo unethanu", "Mweri wo thanu na mocha", "Mweri wo saba", "Mweri wo nane", "Mweri wo tisa", "Mweri wo kumi", "Mweri wo kumi na moja", "Mweri wo kumi na yel’li"}, + {"wichishu", "mchochil’l"}, +} + +var localeTableMghMZ = [5][]string{ + {"Sab", "Jtt", "Jnn", "Jtn", "Ara", "Iju", "Jmo"}, + {"Sabato", "Jumatatu", "Jumanne", "Jumatano", "Arahamisi", "Ijumaa", "Jumamosi"}, + {"Kwa", "Una", "Rar", "Che", "Tha", "Moc", "Sab", "Nan", "Tis", "Kum", "Moj", "Yel"}, + {"Mweri wo kwanza", "Mweri wo unayeli", "Mweri wo uneraru", "Mweri wo unecheshe", "Mweri wo unethanu", "Mweri wo thanu na mocha", "Mweri wo saba", "Mweri wo nane", "Mweri wo tisa", "Mweri wo kumi", "Mweri wo kumi na moja", "Mweri wo kumi na yel’li"}, + {"wichishu", "mchochil’l"}, +} + +var localeTableMgo = [5][]string{ + {}, + {"Aneg 1", "Aneg 2", "Aneg 3", "Aneg 4", "Aneg 5", "Aneg 6", "Aneg 7"}, + {"mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {"iməg mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {}, +} + +var localeTableMgoCM = [5][]string{ + {}, + {"Aneg 1", "Aneg 2", "Aneg 3", "Aneg 4", "Aneg 5", "Aneg 6", "Aneg 7"}, + {"mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {"iməg mbegtug", "imeg àbùbì", "imeg mbəŋchubi", "iməg ngwə̀t", "iməg fog", "iməg ichiibɔd", "iməg àdùmbə̀ŋ", "iməg ichika", "iməg kud", "iməg tèsiʼe", "iməg zò", "iməg krizmed"}, + {}, +} + +var localeTableMi = [5][]string{ + {"Rāt", "Man", "Tūr", "Wen", "Tāi", "Par", "Rāh"}, + {"Rātapu", "Mane", "Tūrei", "Wenerei", "Tāite", "Paraire", "Rāhoroi"}, + {"Hān", "Pēp", "Māe", "Āpe", "Mei", "Hun", "Hūr", "Āku", "Hep", "Oke", "Noe", "Tīh"}, + {"Hānuere", "Pēpuere", "Māehe", "Āpereira", "Mei", "Hune", "Hūrae", "Ākuhata", "Hepetema", "Oketopa", "Noema", "Tīhema"}, + {}, +} + +var localeTableMiNZ = [5][]string{ + {"Rāt", "Man", "Tūr", "Wen", "Tāi", "Par", "Rāh"}, + {"Rātapu", "Mane", "Tūrei", "Wenerei", "Tāite", "Paraire", "Rāhoroi"}, + {"Hān", "Pēp", "Māe", "Āpe", "Mei", "Hun", "Hūr", "Āku", "Hep", "Oke", "Noe", "Tīh"}, + {"Hānuere", "Pēpuere", "Māehe", "Āpereira", "Mei", "Hune", "Hūrae", "Ākuhata", "Hepetema", "Oketopa", "Noema", "Tīhema"}, + {}, +} + +var localeTableMk = [5][]string{ + {"нед.", "пон.", "вто.", "сре.", "чет.", "пет.", "саб."}, + {"недела", "понеделник", "вторник", "среда", "четврток", "петок", "сабота"}, + {"јан.", "фев.", "мар.", "апр.", "мај", "јун.", "јул.", "авг.", "сеп.", "окт.", "ное.", "дек."}, + {"јануари", "февруари", "март", "април", "мај", "јуни", "јули", "август", "септември", "октомври", "ноември", "декември"}, + {"претпл.", "попл."}, +} + +var localeTableMkMK = [5][]string{ + {"нед.", "пон.", "вто.", "сре.", "чет.", "пет.", "саб."}, + {"недела", "понеделник", "вторник", "среда", "четврток", "петок", "сабота"}, + {"јан.", "фев.", "мар.", "апр.", "мај", "јун.", "јул.", "авг.", "сеп.", "окт.", "ное.", "дек."}, + {"јануари", "февруари", "март", "април", "мај", "јуни", "јули", "август", "септември", "октомври", "ноември", "декември"}, + {"претпл.", "попл."}, +} + +var localeTableMl = [5][]string{ + {"ഞായർ", "തിങ്കൾ", "ചൊവ്വ", "ബുധൻ", "വ്യാഴം", "വെള്ളി", "ശനി"}, + {"ഞായറാഴ്‌ച", "തിങ്കളാഴ്‌ച", "ചൊവ്വാഴ്ച", "ബുധനാഴ്‌ച", "വ്യാഴാഴ്‌ച", "വെള്ളിയാഴ്‌ച", "ശനിയാഴ്‌ച"}, + {"ജനു", "ഫെബ്രു", "മാർ", "ഏപ്രി", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗ", "സെപ്റ്റം", "ഒക്ടോ", "നവം", "ഡിസം"}, + {"ജനുവരി", "ഫെബ്രുവരി", "മാർച്ച്", "ഏപ്രിൽ", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗസ്റ്റ്", "സെപ്റ്റംബർ", "ഒക്‌ടോബർ", "നവംബർ", "ഡിസംബർ"}, + {}, +} + +var localeTableMlIN = [5][]string{ + {"ഞായർ", "തിങ്കൾ", "ചൊവ്വ", "ബുധൻ", "വ്യാഴം", "വെള്ളി", "ശനി"}, + {"ഞായറാഴ്‌ച", "തിങ്കളാഴ്‌ച", "ചൊവ്വാഴ്ച", "ബുധനാഴ്‌ച", "വ്യാഴാഴ്‌ച", "വെള്ളിയാഴ്‌ച", "ശനിയാഴ്‌ച"}, + {"ജനു", "ഫെബ്രു", "മാർ", "ഏപ്രി", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗ", "സെപ്റ്റം", "ഒക്ടോ", "നവം", "ഡിസം"}, + {"ജനുവരി", "ഫെബ്രുവരി", "മാർച്ച്", "ഏപ്രിൽ", "മേയ്", "ജൂൺ", "ജൂലൈ", "ഓഗസ്റ്റ്", "സെപ്റ്റംബർ", "ഒക്‌ടോബർ", "നവംബർ", "ഡിസംബർ"}, + {}, +} + +var localeTableMn = [5][]string{ + {"Ня", "Да", "Мя", "Лх", "Пү", "Ба", "Бя"}, + {"ням", "даваа", "мягмар", "лхагва", "пүрэв", "баасан", "бямба"}, + {"1-р сар", "2-р сар", "3-р сар", "4-р сар", "5-р сар", "6-р сар", "7-р сар", "8-р сар", "9-р сар", "10-р сар", "11-р сар", "12-р сар"}, + {"нэгдүгээр сар", "хоёрдугаар сар", "гуравдугаар сар", "дөрөвдүгээр сар", "тавдугаар сар", "зургаадугаар сар", "долоодугаар сар", "наймдугаар сар", "есдүгээр сар", "аравдугаар сар", "арван нэгдүгээр сар", "арван хоёрдугаар сар"}, + {"ү.ө.", "ү.х."}, +} + +var localeTableMnMN = [5][]string{ + {"Ня", "Да", "Мя", "Лх", "Пү", "Ба", "Бя"}, + {"ням", "даваа", "мягмар", "лхагва", "пүрэв", "баасан", "бямба"}, + {"1-р сар", "2-р сар", "3-р сар", "4-р сар", "5-р сар", "6-р сар", "7-р сар", "8-р сар", "9-р сар", "10-р сар", "11-р сар", "12-р сар"}, + {"нэгдүгээр сар", "хоёрдугаар сар", "гуравдугаар сар", "дөрөвдүгээр сар", "тавдугаар сар", "зургаадугаар сар", "долоодугаар сар", "наймдугаар сар", "есдүгээр сар", "аравдугаар сар", "арван нэгдүгээр сар", "арван хоёрдугаар сар"}, + {"ү.ө.", "ү.х."}, +} + +var localeTableMnMongMN = [5][]string{ + {"ᠨᠢ", "ᠲᠠ", "ᠮᠢᠭ", "ᡀᠠ", "ᠫᠥᠷ", "ᠪᠠ", "ᠪᠢᠮ"}, + {"ᠨᠢᠮ᠎ᠠ", "ᠳᠠᠸᠠ", "ᠮᠢᠠᠠᠮᠠᠷ", "ᡀᠠᠭᠪᠠ", "ᠫᠦᠷᠪᠦ", "ᠪᠠᠰᠠᠩ", "ᠪᠢᠮᠪᠠ"}, + {"1 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "2 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "3᠊ᠷ ᠰᠠᠷ᠎ᠠ", "4 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "5 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "6 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "7 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "8᠊ᠷ ᠰᠠᠷ᠎ᠠ", "9 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "10 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "11 ᠊ᠷ ᠰᠠᠷ᠎ᠠ", "12 ᠊ᠷ ᠰᠠᠷ᠎ᠠ"}, + {"ᠨᠢᠭᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠬᠣᠶᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ", "ᠭᠣᠷᠪᠡᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ", "ᠳᠥᠷᠪᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠲᠠᠪᠣᠳᠣᠭᠠᠷ ᠰᠠᠷ ᠠ", "ᠵᠢᠷᠭᠣᠭᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠲᠣᠯᠣᠭᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠨᠠᠢᠮᠠᠳᠥᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠶᠢᠰᠥᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠠᠷᠪᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ", "ᠠᠷᠪᠠᠨ ᠨᠢᠭᠡᠳᠥᠭᠡᠷ ᠰᠠᠷ᠎ᠠ", "ᠠᠷᠪᠠᠨ ᠬᠣᠶᠠᠳᠣᠭᠠᠷ ᠰᠠᠷ᠎ᠠ"}, + {}, +} + +var localeTableMni = [5][]string{ + {}, + {"নোংমাইজিং", "নিংথৌকাবা", "লৈবাকপোকপা", "য়ুমশকৈশা", "শগোলশেন", "ইরাই", "থাংজ"}, + {"জন", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "ওগ", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"জনুৱারী", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "‌ওগষ্ট", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"নুমাং", "PM"}, +} + +var localeTableMniBeng = [5][]string{ + {}, + {"নোংমাইজিং", "নিংথৌকাবা", "লৈবাকপোকপা", "য়ুমশকৈশা", "শগোলশেন", "ইরাই", "থাংজ"}, + {"জন", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "ওগ", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"জনুৱারী", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "‌ওগষ্ট", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"নুমাং", "PM"}, +} + +var localeTableMniBengIN = [5][]string{ + {}, + {"নোংমাইজিং", "নিংথৌকাবা", "লৈবাকপোকপা", "য়ুমশকৈশা", "শগোলশেন", "ইরাই", "থাংজ"}, + {"জন", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "ওগ", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"জনুৱারী", "ফেব্রুৱারি", "মার্চ", "এপ্রিল", "মে", "জুন", "জুলাই", "‌ওগষ্ট", "সেপ্টেম্বর", "ওক্টোবর", "নভেম্বর", "ডিসেম্বর"}, + {"নুমাং", "PM"}, +} + +var localeTableMr = [5][]string{ + {"रवि", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगळवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जाने", "फेब्रु", "मार्च", "एप्रि", "मे", "जून", "जुलै", "ऑग", "सप्टें", "ऑक्टो", "नोव्हें", "डिसें"}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रिल", "मे", "जून", "जुलै", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableMrIN = [5][]string{ + {"रवि", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवार", "सोमवार", "मंगळवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {"जाने", "फेब्रु", "मार्च", "एप्रि", "मे", "जून", "जुलै", "ऑग", "सप्टें", "ऑक्टो", "नोव्हें", "डिसें"}, + {"जानेवारी", "फेब्रुवारी", "मार्च", "एप्रिल", "मे", "जून", "जुलै", "ऑगस्ट", "सप्टेंबर", "ऑक्टोबर", "नोव्हेंबर", "डिसेंबर"}, + {"a", "p"}, +} + +var localeTableMs = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMsArab = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsArabBN = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsArabMY = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsBN = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMsID = [5][]string{ + {}, + {"احد", "اثنين", "ثلاث", "رابو", "خميس", "جمعة", "سبتو"}, + {}, + {"جانواري", "فيبواري", "مچ", "اڤريل", "مي", "جون", "جولاي", "ݢوس", "سيڤتيمبر", "اوکتوبر", "نوۏيمبر", "ديسيمبر"}, + {}, +} + +var localeTableMsMY = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMsSG = [5][]string{ + {"Ahd", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab"}, + {"Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"}, + {"Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"}, + {"PG", "PTG"}, +} + +var localeTableMt = [5][]string{ + {"Ħad", "Tne", "Tli", "Erb", "Ħam", "Ġim", "Sib"}, + {"Il-Ħadd", "It-Tnejn", "It-Tlieta", "L-Erbgħa", "Il-Ħamis", "Il-Ġimgħa", "Is-Sibt"}, + {"Jan", "Fra", "Mar", "Apr", "Mej", "Ġun", "Lul", "Aww", "Set", "Ott", "Nov", "Diċ"}, + {"Jannar", "Frar", "Marzu", "April", "Mejju", "Ġunju", "Lulju", "Awwissu", "Settembru", "Ottubru", "Novembru", "Diċembru"}, + {"am", "pm"}, +} + +var localeTableMtMT = [5][]string{ + {"Ħad", "Tne", "Tli", "Erb", "Ħam", "Ġim", "Sib"}, + {"Il-Ħadd", "It-Tnejn", "It-Tlieta", "L-Erbgħa", "Il-Ħamis", "Il-Ġimgħa", "Is-Sibt"}, + {"Jan", "Fra", "Mar", "Apr", "Mej", "Ġun", "Lul", "Aww", "Set", "Ott", "Nov", "Diċ"}, + {"Jannar", "Frar", "Marzu", "April", "Mejju", "Ġunju", "Lulju", "Awwissu", "Settembru", "Ottubru", "Novembru", "Diċembru"}, + {"am", "pm"}, +} + +var localeTableMua = [5][]string{ + {"Cya", "Cla", "Czi", "Cko", "Cka", "Cga", "Cze"}, + {"Com’yakke", "Comlaaɗii", "Comzyiiɗii", "Comkolle", "Comkaldǝɓlii", "Comgaisuu", "Comzyeɓsuu"}, + {"FLO", "CLA", "CKI", "FMF", "MAD", "MBI", "MLI", "MAM", "FDE", "FMU", "FGW", "FYU"}, + {"Fĩi Loo", "Cokcwaklaŋne", "Cokcwaklii", "Fĩi Marfoo", "Madǝǝuutǝbijaŋ", "Mamǝŋgwãafahbii", "Mamǝŋgwãalii", "Madǝmbii", "Fĩi Dǝɓlii", "Fĩi Mundaŋ", "Fĩi Gwahlle", "Fĩi Yuru"}, + {"comme", "lilli"}, +} + +var localeTableMuaCM = [5][]string{ + {"Cya", "Cla", "Czi", "Cko", "Cka", "Cga", "Cze"}, + {"Com’yakke", "Comlaaɗii", "Comzyiiɗii", "Comkolle", "Comkaldǝɓlii", "Comgaisuu", "Comzyeɓsuu"}, + {"FLO", "CLA", "CKI", "FMF", "MAD", "MBI", "MLI", "MAM", "FDE", "FMU", "FGW", "FYU"}, + {"Fĩi Loo", "Cokcwaklaŋne", "Cokcwaklii", "Fĩi Marfoo", "Madǝǝuutǝbijaŋ", "Mamǝŋgwãafahbii", "Mamǝŋgwãalii", "Madǝmbii", "Fĩi Dǝɓlii", "Fĩi Mundaŋ", "Fĩi Gwahlle", "Fĩi Yuru"}, + {"comme", "lilli"}, +} + +var localeTableMus = [5][]string{ + {}, + {"Nettvʼcako", "Enhvteceskv", "Enhvteceskv Enhvyvtke", "Ennvrkvpv", "Ennvrkvpv Enhvyvtke", "Nak Okkoskv Nettv", "Nettv Cakʼcuse"}, + {}, + {"Rvfo Cuse", "Hotvle Hvse", "Tasahcuce", "Tasahce Rakko", "Ke Hvse", "Kvco Hvse", "Hiyuce", "Hiyo Rakko", "Otowoskuce", "Otowoskv Rakko", "Ehole", "Rvfo Rakko"}, + {}, +} + +var localeTableMusUS = [5][]string{ + {}, + {"Nettvʼcako", "Enhvteceskv", "Enhvteceskv Enhvyvtke", "Ennvrkvpv", "Ennvrkvpv Enhvyvtke", "Nak Okkoskv Nettv", "Nettv Cakʼcuse"}, + {}, + {"Rvfo Cuse", "Hotvle Hvse", "Tasahcuce", "Tasahce Rakko", "Ke Hvse", "Kvco Hvse", "Hiyuce", "Hiyo Rakko", "Otowoskuce", "Otowoskv Rakko", "Ehole", "Rvfo Rakko"}, + {}, +} + +var localeTableMy = [5][]string{ + {}, + {"တနင်္ဂနွေ", "တနင်္လာ", "အင်္ဂါ", "ဗုဒ္ဓဟူး", "ကြာသပတေး", "သောကြာ", "စနေ"}, + {"ဇန်", "ဖေ", "မတ်", "ဧ", "မေ", "ဇွန်", "ဇူ", "ဩ", "စက်", "အောက်", "နို", "ဒီ"}, + {"ဇန်နဝါရီ", "ဖေဖော်ဝါရီ", "မတ်", "ဧပြီ", "မေ", "ဇွန်", "ဇူလိုင်", "ဩဂုတ်", "စက်တင်ဘာ", "အောက်တိုဘာ", "နိုဝင်ဘာ", "ဒီဇင်ဘာ"}, + {"နံနက်", "ညနေ"}, +} + +var localeTableMyMM = [5][]string{ + {}, + {"တနင်္ဂနွေ", "တနင်္လာ", "အင်္ဂါ", "ဗုဒ္ဓဟူး", "ကြာသပတေး", "သောကြာ", "စနေ"}, + {"ဇန်", "ဖေ", "မတ်", "ဧ", "မေ", "ဇွန်", "ဇူ", "ဩ", "စက်", "အောက်", "နို", "ဒီ"}, + {"ဇန်နဝါရီ", "ဖေဖော်ဝါရီ", "မတ်", "ဧပြီ", "မေ", "ဇွန်", "ဇူလိုင်", "ဩဂုတ်", "စက်တင်ဘာ", "အောက်တိုဘာ", "နိုဝင်ဘာ", "ဒီဇင်ဘာ"}, + {"နံနက်", "ညနေ"}, +} + +var localeTableMyv = [5][]string{ + {"тар", "атя", "вас", "кун", "кал", "сюк", "шля"}, + {"таргочистэ", "атяньчистэ", "вастаньчистэ", "куншкачистэ", "калоньчистэ", "сюконьчистэ", "шлямочистэ"}, + {}, + {"якшамков", "даволков", "эйзюрков", "чадыков", "панжиков", "аштемков", "медьков", "умарьков", "таштамков", "ожоков", "сундерьков", "ацамков"}, + {}, +} + +var localeTableMyvRU = [5][]string{ + {"тар", "атя", "вас", "кун", "кал", "сюк", "шля"}, + {"таргочистэ", "атяньчистэ", "вастаньчистэ", "куншкачистэ", "калоньчистэ", "сюконьчистэ", "шлямочистэ"}, + {}, + {"якшамков", "даволков", "эйзюрков", "чадыков", "панжиков", "аштемков", "медьков", "умарьков", "таштамков", "ожоков", "сундерьков", "ацамков"}, + {}, +} + +var localeTableMzn = [5][]string{ + {}, + {}, + {}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {}, +} + +var localeTableMznIR = [5][]string{ + {}, + {}, + {}, + {"ژانویه", "فوریه", "مارس", "آوریل", "مه", "ژوئن", "ژوئیه", "اوت", "سپتامبر", "اکتبر", "نوامبر", "دسامبر"}, + {}, +} + +var localeTableNaq = [5][]string{ + {"Son", "Ma", "De", "Wu", "Do", "Fr", "Sat"}, + {"Sontaxtsees", "Mantaxtsees", "Denstaxtsees", "Wunstaxtsees", "Dondertaxtsees", "Fraitaxtsees", "Satertaxtsees"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"ǃKhanni", "ǃKhanǀgôab", "ǀKhuuǁkhâb", "ǃHôaǂkhaib", "ǃKhaitsâb", "Gamaǀaeb", "ǂKhoesaob", "Aoǁkhuumûǁkhâb", "Taraǀkhuumûǁkhâb", "ǂNûǁnâiseb", "ǀHooǂgaeb", "Hôasoreǁkhâb"}, + {"ǁgoagas", "ǃuias"}, +} + +var localeTableNaqNA = [5][]string{ + {"Son", "Ma", "De", "Wu", "Do", "Fr", "Sat"}, + {"Sontaxtsees", "Mantaxtsees", "Denstaxtsees", "Wunstaxtsees", "Dondertaxtsees", "Fraitaxtsees", "Satertaxtsees"}, + {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}, + {"ǃKhanni", "ǃKhanǀgôab", "ǀKhuuǁkhâb", "ǃHôaǂkhaib", "ǃKhaitsâb", "Gamaǀaeb", "ǂKhoesaob", "Aoǁkhuumûǁkhâb", "Taraǀkhuumûǁkhâb", "ǂNûǁnâiseb", "ǀHooǂgaeb", "Hôasoreǁkhâb"}, + {"ǁgoagas", "ǃuias"}, +} + +var localeTableNd = [5][]string{ + {"Son", "Mvu", "Sib", "Sit", "Sin", "Sih", "Mgq"}, + {"Sonto", "Mvulo", "Sibili", "Sithathu", "Sine", "Sihlanu", "Mgqibelo"}, + {"Zib", "Nhlo", "Mbi", "Mab", "Nkw", "Nhla", "Ntu", "Ncw", "Mpan", "Mfu", "Lwe", "Mpal"}, + {"Zibandlela", "Nhlolanja", "Mbimbitho", "Mabasa", "Nkwenkwezi", "Nhlangula", "Ntulikazi", "Ncwabakazi", "Mpandula", "Mfumfu", "Lwezi", "Mpalakazi"}, + {}, +} + +var localeTableNdZW = [5][]string{ + {"Son", "Mvu", "Sib", "Sit", "Sin", "Sih", "Mgq"}, + {"Sonto", "Mvulo", "Sibili", "Sithathu", "Sine", "Sihlanu", "Mgqibelo"}, + {"Zib", "Nhlo", "Mbi", "Mab", "Nkw", "Nhla", "Ntu", "Ncw", "Mpan", "Mfu", "Lwe", "Mpal"}, + {"Zibandlela", "Nhlolanja", "Mbimbitho", "Mabasa", "Nkwenkwezi", "Nhlangula", "Ntulikazi", "Ncwabakazi", "Mpandula", "Mfumfu", "Lwezi", "Mpalakazi"}, + {}, +} + +var localeTableNds = [5][]string{ + {"Sü.", "Ma.", "Di.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sünndag", "Maandag", "Dingsdag", "Middeweken", "Dunnersdag", "Freedag", "Sünnavend"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januaar", "Februaar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktover", "November", "Dezember"}, + {"vm", "nm"}, +} + +var localeTableNdsDE = [5][]string{ + {"Sü.", "Ma.", "Di.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sünndag", "Maandag", "Dingsdag", "Middeweken", "Dunnersdag", "Freedag", "Sünnavend"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januaar", "Februaar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktover", "November", "Dezember"}, + {"vm", "nm"}, +} + +var localeTableNdsNL = [5][]string{ + {"Sü.", "Ma.", "Di.", "Mi.", "Du.", "Fr.", "Sa."}, + {"Sünndag", "Maandag", "Dingsdag", "Middeweken", "Dunnersdag", "Freedag", "Sünnavend"}, + {"Jan.", "Feb.", "März", "Apr.", "Mai", "Juni", "Juli", "Aug.", "Sep.", "Okt.", "Nov.", "Dez."}, + {"Januaar", "Februaar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktover", "November", "Dezember"}, + {"vm", "nm"}, +} + +var localeTableNe = [5][]string{ + {"आइत", "सोम", "मङ्गल", "बुध", "बिहि", "शुक्र", "शनि"}, + {"आइतबार", "सोमबार", "मङ्गलबार", "बुधबार", "बिहिबार", "शुक्रबार", "शनिबार"}, + {}, + {"जनवरी", "फेब्रुअरी", "मार्च", "अप्रिल", "मे", "जुन", "जुलाई", "अगस्ट", "सेप्टेम्बर", "अक्टोबर", "नोभेम्बर", "डिसेम्बर"}, + {"पूर्वाह्न", "अपराह्न"}, +} + +var localeTableNeIN = [5][]string{ + {"आइत", "सोम", "मङ्गल", "बुध", "बिहि", "शुक्र", "शनि"}, + {"आइतबार", "सोमबार", "मङ्गलबार", "बुधबार", "बिहिबार", "शुक्रबार", "शनिबार"}, + {}, + {"जनवरी", "फेब्रुअरी", "मार्च", "अप्रिल", "मे", "जुन", "जुलाई", "अगस्ट", "सेप्टेम्बर", "अक्टोबर", "नोभेम्बर", "डिसेम्बर"}, + {"पूर्वाह्न", "अपराह्न"}, +} + +var localeTableNeNP = [5][]string{ + {"आइत", "सोम", "मङ्गल", "बुध", "बिहि", "शुक्र", "शनि"}, + {"आइतबार", "सोमबार", "मङ्गलबार", "बुधबार", "बिहिबार", "शुक्रबार", "शनिबार"}, + {}, + {"जनवरी", "फेब्रुअरी", "मार्च", "अप्रिल", "मे", "जुन", "जुलाई", "अगस्ट", "सेप्टेम्बर", "अक्टोबर", "नोभेम्बर", "डिसेम्बर"}, + {"पूर्वाह्न", "अपराह्न"}, +} + +var localeTableNl = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlAW = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlBE = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlBQ = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlCW = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlNL = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlSR = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNlSX = [5][]string{ + {"zo", "ma", "di", "wo", "do", "vr", "za"}, + {"zondag", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag"}, + {"jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec"}, + {"januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december"}, + {"a.m.", "p.m."}, +} + +var localeTableNmg = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "mbs", "sas"}, + {"sɔ́ndɔ", "mɔ́ndɔ", "sɔ́ndɔ mafú mába", "sɔ́ndɔ mafú málal", "sɔ́ndɔ mafú mána", "mabágá má sukul", "sásadi"}, + {"ng1", "ng2", "ng3", "ng4", "ng5", "ng6", "ng7", "ng8", "ng9", "ng10", "ng11", "kris"}, + {"ngwɛn matáhra", "ngwɛn ńmba", "ngwɛn ńlal", "ngwɛn ńna", "ngwɛn ńtan", "ngwɛn ńtuó", "ngwɛn hɛmbuɛrí", "ngwɛn lɔmbi", "ngwɛn rɛbvuâ", "ngwɛn wum", "ngwɛn wum navǔr", "krísimin"}, + {"maná", "kugú"}, +} + +var localeTableNmgCM = [5][]string{ + {"sɔ́n", "mɔ́n", "smb", "sml", "smn", "mbs", "sas"}, + {"sɔ́ndɔ", "mɔ́ndɔ", "sɔ́ndɔ mafú mába", "sɔ́ndɔ mafú málal", "sɔ́ndɔ mafú mána", "mabágá má sukul", "sásadi"}, + {"ng1", "ng2", "ng3", "ng4", "ng5", "ng6", "ng7", "ng8", "ng9", "ng10", "ng11", "kris"}, + {"ngwɛn matáhra", "ngwɛn ńmba", "ngwɛn ńlal", "ngwɛn ńna", "ngwɛn ńtan", "ngwɛn ńtuó", "ngwɛn hɛmbuɛrí", "ngwɛn lɔmbi", "ngwɛn rɛbvuâ", "ngwɛn wum", "ngwɛn wum navǔr", "krísimin"}, + {"maná", "kugú"}, +} + +var localeTableNn = [5][]string{ + {"sø.", "må.", "ty.", "on.", "to.", "fr.", "la."}, + {"søndag", "måndag", "tysdag", "onsdag", "torsdag", "fredag", "laurdag"}, + {}, + {}, + {"f.m.", "e.m."}, +} + +var localeTableNnNO = [5][]string{ + {"sø.", "må.", "ty.", "on.", "to.", "fr.", "la."}, + {"søndag", "måndag", "tysdag", "onsdag", "torsdag", "fredag", "laurdag"}, + {}, + {}, + {"f.m.", "e.m."}, +} + +var localeTableNnh = [5][]string{ + {}, + {"lyɛʼɛ́ sẅíŋtè", "mvfò lyɛ̌ʼ", "mbɔ́ɔntè mvfò lyɛ̌ʼ", "tsètsɛ̀ɛ lyɛ̌ʼ", "mbɔ́ɔntè tsetsɛ̀ɛ lyɛ̌ʼ", "mvfò màga lyɛ̌ʼ", "màga lyɛ̌ʼ"}, + {}, + {"saŋ tsetsɛ̀ɛ lùm", "saŋ kàg ngwóŋ", "saŋ lepyè shúm", "saŋ cÿó", "saŋ tsɛ̀ɛ cÿó", "saŋ njÿoláʼ", "saŋ tyɛ̀b tyɛ̀b mbʉ̀ŋ", "saŋ mbʉ̀ŋ", "saŋ ngwɔ̀ʼ mbÿɛ", "saŋ tàŋa tsetsáʼ", "saŋ mejwoŋó", "saŋ lùm"}, + {"mbaʼámbaʼ", "ncwònzém"}, +} + +var localeTableNnhCM = [5][]string{ + {}, + {"lyɛʼɛ́ sẅíŋtè", "mvfò lyɛ̌ʼ", "mbɔ́ɔntè mvfò lyɛ̌ʼ", "tsètsɛ̀ɛ lyɛ̌ʼ", "mbɔ́ɔntè tsetsɛ̀ɛ lyɛ̌ʼ", "mvfò màga lyɛ̌ʼ", "màga lyɛ̌ʼ"}, + {}, + {"saŋ tsetsɛ̀ɛ lùm", "saŋ kàg ngwóŋ", "saŋ lepyè shúm", "saŋ cÿó", "saŋ tsɛ̀ɛ cÿó", "saŋ njÿoláʼ", "saŋ tyɛ̀b tyɛ̀b mbʉ̀ŋ", "saŋ mbʉ̀ŋ", "saŋ ngwɔ̀ʼ mbÿɛ", "saŋ tàŋa tsetsáʼ", "saŋ mejwoŋó", "saŋ lùm"}, + {"mbaʼámbaʼ", "ncwònzém"}, +} + +var localeTableNo = [5][]string{ + {"søn.", "man.", "tir.", "ons.", "tor.", "fre.", "lør."}, + {"søndag", "mandag", "tirsdag", "onsdag", "torsdag", "fredag", "lørdag"}, + {"jan.", "feb.", "mars", "apr.", "mai", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "des."}, + {"januar", "februar", "mars", "april", "mai", "juni", "juli", "august", "september", "oktober", "november", "desember"}, + {"a.m.", "p.m."}, +} + +var localeTableNqo = [5][]string{ + {"ߞߊ߯ߙ", "ߞߐ߬ߓ", "ߞߐ߬ߟߏ߲", "ߞߎߣ", "ߓߌߟ", "ߛߌ߬ߣ", "ߞߍ߲ߘ"}, + {"ߞߊ߯ߙߌߟߏ߲", "ߞߐ߬ߓߊ߬ߟߏ߲", "ߞߐ߬ߟߏ߲", "ߞߎߣߎ߲ߟߏ߲", "ߓߌߟߏ߲", "ߛߌ߬ߣߌ߲߬ߟߏ߲", "ߞߍ߲ߘߍߟߏ߲"}, + {"ߓߌ߲ߠ", "ߞߏ߲ߞ", "ߕߙߊ", "ߞߏ߲ߘ", "ߘߓߊ߬ߕ", "ߥߊ߬ߛ", "ߞߊ߬ߙ", "ߘߓߊ߬ߓ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓ", "ߣߍߣ", "ߞߏߟ"}, + {"ߓߌ߲ߠߊߥߎߟߋ߲", "ߞߏ߲ߞߏߜߍ", "ߕߙߊߓߊ", "ߞߏ߲ߞߏߘߌ߬ߓߌ", "ߘߓߊ߬ߕߊ", "ߥߊ߬ߛߌ߬ߥߙߊ", "ߞߊ߬ߙߌߝߐ߭", "ߘߓߊ߬ߓߌߟߊ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓߌߕߌ߮", "ߣߍߣߍߓߊ", "ߞߏߟߌ߲ߞߏߟߌ߲"}, + {"ߛ", "ߥ"}, +} + +var localeTableNqoGN = [5][]string{ + {"ߞߊ߯ߙ", "ߞߐ߬ߓ", "ߞߐ߬ߟߏ߲", "ߞߎߣ", "ߓߌߟ", "ߛߌ߬ߣ", "ߞߍ߲ߘ"}, + {"ߞߊ߯ߙߌߟߏ߲", "ߞߐ߬ߓߊ߬ߟߏ߲", "ߞߐ߬ߟߏ߲", "ߞߎߣߎ߲ߟߏ߲", "ߓߌߟߏ߲", "ߛߌ߬ߣߌ߲߬ߟߏ߲", "ߞߍ߲ߘߍߟߏ߲"}, + {"ߓߌ߲ߠ", "ߞߏ߲ߞ", "ߕߙߊ", "ߞߏ߲ߘ", "ߘߓߊ߬ߕ", "ߥߊ߬ߛ", "ߞߊ߬ߙ", "ߘߓߊ߬ߓ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓ", "ߣߍߣ", "ߞߏߟ"}, + {"ߓߌ߲ߠߊߥߎߟߋ߲", "ߞߏ߲ߞߏߜߍ", "ߕߙߊߓߊ", "ߞߏ߲ߞߏߘߌ߬ߓߌ", "ߘߓߊ߬ߕߊ", "ߥߊ߬ߛߌ߬ߥߙߊ", "ߞߊ߬ߙߌߝߐ߭", "ߘߓߊ߬ߓߌߟߊ", "ߕߎߟߊߝߌ߲", "ߞߏ߲ߓߌߕߌ߮", "ߣߍߣߍߓߊ", "ߞߏߟߌ߲ߞߏߟߌ߲"}, + {"ߛ", "ߥ"}, +} + +var localeTableNr = [5][]string{ + {"Son", "Mvu", "Bil", "Tha", "Ne", "Hla", "Gqi"}, + {"uSonto", "uMvulo", "uLesibili", "Lesithathu", "uLesine", "ngoLesihlanu", "umGqibelo"}, + {"Jan", "Feb", "Mat", "Apr", "Mey", "Jun", "Jul", "Arh", "Sep", "Okt", "Usi", "Dis"}, + {"Janabari", "uFeberbari", "uMatjhi", "u-Apreli", "Meyi", "Juni", "Julayi", "Arhostosi", "Septemba", "Oktoba", "Usinyikhaba", "Disemba"}, + {}, +} + +var localeTableNrZA = [5][]string{ + {"Son", "Mvu", "Bil", "Tha", "Ne", "Hla", "Gqi"}, + {"uSonto", "uMvulo", "uLesibili", "Lesithathu", "uLesine", "ngoLesihlanu", "umGqibelo"}, + {"Jan", "Feb", "Mat", "Apr", "Mey", "Jun", "Jul", "Arh", "Sep", "Okt", "Usi", "Dis"}, + {"Janabari", "uFeberbari", "uMatjhi", "u-Apreli", "Meyi", "Juni", "Julayi", "Arhostosi", "Septemba", "Oktoba", "Usinyikhaba", "Disemba"}, + {}, +} + +var localeTableNso = [5][]string{ + {"Lam", "Mos", "Bed", "Rar", "Ne", "Hla", "Mok"}, + {"Lamorena", "Musopologo", "Labobedi", "Laboraro", "Labone", "Labohlano", "Mokibelo"}, + {"Phere", "Dibo", "Hlak", "Mora", "Mopi", "Phupu", "Mose", "Phato", "Lewe", "Dipha", "Diba", "Manth"}, + {"Pherekgong", "Dibokwane", "Hlakola", "Moranang", "Mopitlo", "Phupu", "Mosegemanye", "Phato", "Lewedi", "Diphalane", "Dibatsela", "Manthole"}, + {"a", "p"}, +} + +var localeTableNsoZA = [5][]string{ + {"Lam", "Mos", "Bed", "Rar", "Ne", "Hla", "Mok"}, + {"Lamorena", "Musopologo", "Labobedi", "Laboraro", "Labone", "Labohlano", "Mokibelo"}, + {"Phere", "Dibo", "Hlak", "Mora", "Mopi", "Phupu", "Mose", "Phato", "Lewe", "Dipha", "Diba", "Manth"}, + {"Pherekgong", "Dibokwane", "Hlakola", "Moranang", "Mopitlo", "Phupu", "Mosegemanye", "Phato", "Lewedi", "Diphalane", "Dibatsela", "Manthole"}, + {"a", "p"}, +} + +var localeTableNus = [5][]string{ + {"Cäŋ", "Jiec", "Rɛw", "Diɔ̱k", "Ŋuaan", "Dhieec", "Bäkɛl"}, + {"Cäŋ kuɔth", "Jiec la̱t", "Rɛw lätni", "Diɔ̱k lätni", "Ŋuaan lätni", "Dhieec lätni", "Bäkɛl lätni"}, + {"Tiop", "Pɛt", "Duɔ̱ɔ̱", "Guak", "Duä", "Kor", "Pay", "Thoo", "Tɛɛ", "Laa", "Kur", "Tid"}, + {"Tiop thar pɛt", "Pɛt", "Duɔ̱ɔ̱ŋ", "Guak", "Duät", "Kornyoot", "Pay yie̱tni", "Tho̱o̱r", "Tɛɛr", "Laath", "Kur", "Tio̱p in di̱i̱t"}, + {"RW", "TŊ"}, +} + +var localeTableNusSS = [5][]string{ + {"Cäŋ", "Jiec", "Rɛw", "Diɔ̱k", "Ŋuaan", "Dhieec", "Bäkɛl"}, + {"Cäŋ kuɔth", "Jiec la̱t", "Rɛw lätni", "Diɔ̱k lätni", "Ŋuaan lätni", "Dhieec lätni", "Bäkɛl lätni"}, + {"Tiop", "Pɛt", "Duɔ̱ɔ̱", "Guak", "Duä", "Kor", "Pay", "Thoo", "Tɛɛ", "Laa", "Kur", "Tid"}, + {"Tiop thar pɛt", "Pɛt", "Duɔ̱ɔ̱ŋ", "Guak", "Duät", "Kornyoot", "Pay yie̱tni", "Tho̱o̱r", "Tɛɛr", "Laath", "Kur", "Tio̱p in di̱i̱t"}, + {"RW", "TŊ"}, +} + +var localeTableNy = [5][]string{ + {"Mul", "Lem", "Wir", "Tat", "Nai", "San", "Wer"}, + {"Lamulungu", "Lolemba", "Lachiwiri", "Lachitatu", "Lachinayi", "Lachisanu", "Loweruka"}, + {"Jan", "Feb", "Mal", "Epu", "Mei", "Jun", "Jul", "Oga", "Sep", "Oku", "Nov", "Dis"}, + {"Januwale", "Febuluwale", "Malichi", "Epulo", "Mei", "Juni", "Julai", "Ogasiti", "Seputemba", "Okutoba", "Novemba", "Disemba"}, + {}, +} + +var localeTableNyMW = [5][]string{ + {"Mul", "Lem", "Wir", "Tat", "Nai", "San", "Wer"}, + {"Lamulungu", "Lolemba", "Lachiwiri", "Lachitatu", "Lachinayi", "Lachisanu", "Loweruka"}, + {"Jan", "Feb", "Mal", "Epu", "Mei", "Jun", "Jul", "Oga", "Sep", "Oku", "Nov", "Dis"}, + {"Januwale", "Febuluwale", "Malichi", "Epulo", "Mei", "Juni", "Julai", "Ogasiti", "Seputemba", "Okutoba", "Novemba", "Disemba"}, + {}, +} + +var localeTableNyn = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableNynUG = [5][]string{ + {"SAN", "ORK", "OKB", "OKS", "OKN", "OKT", "OMK"}, + {"Sande", "Orwokubanza", "Orwakabiri", "Orwakashatu", "Orwakana", "Orwakataano", "Orwamukaaga"}, + {"KBZ", "KBR", "KST", "KKN", "KTN", "KMK", "KMS", "KMN", "KMW", "KKM", "KNK", "KNB"}, + {"Okwokubanza", "Okwakabiri", "Okwakashatu", "Okwakana", "Okwakataana", "Okwamukaaga", "Okwamushanju", "Okwamunaana", "Okwamwenda", "Okwaikumi", "Okwaikumi na kumwe", "Okwaikumi na ibiri"}, + {}, +} + +var localeTableOc = [5][]string{ + {}, + {"dimenge", "diluns", "dimars", "dimècres", "dijòus", "divendres", "dissabte"}, + {"gen.", "feb.", "març", "abr.", "mai", "junh", "jul.", "ago.", "set.", "oct.", "nov.", "dec."}, + {"de genièr", "de febrièr", "de març", "d’abril", "de mai", "de junh", "de julhet", "d’agost", "de setembre", "d’octòbre", "de novembre", "de decembre"}, + {}, +} + +var localeTableOcES = [5][]string{ + {"dim", "del", "dma", "dmè", "dij", "diu", "dis"}, + {"dimenge", "deluns", "dimars", "dimèrcles", "dijaus", "diuendres", "dissabte"}, + {"gèr", "her", "mar", "abr", "mai", "jun", "jur", "ago", "set", "oct", "nov", "dec"}, + {"gèr", "hereuèr", "març", "abriu", "mai", "junh", "juriòl", "agost", "seteme", "octobre", "noveme", "deseme"}, + {"a.m.", "p.m."}, +} + +var localeTableOcFR = [5][]string{ + {}, + {"dimenge", "diluns", "dimars", "dimècres", "dijòus", "divendres", "dissabte"}, + {"gen.", "feb.", "març", "abr.", "mai", "junh", "jul.", "ago.", "set.", "oct.", "nov.", "dec."}, + {"de genièr", "de febrièr", "de març", "d’abril", "de mai", "de junh", "de julhet", "d’agost", "de setembre", "d’octòbre", "de novembre", "de decembre"}, + {}, +} + +var localeTableOm = [5][]string{ + {"Dil", "Wix", "Qib", "Rob", "Kam", "Jim", "San"}, + {"Dilbata", "Wiixata", "Qibxata", "Roobii", "Kamiisa", "Jimaata", "Sanbata"}, + {"Ama", "Gur", "Bit", "Elb", "Cam", "Wax", "Ado", "Hag", "Ful", "Onk", "Sad", "Mud"}, + {"Amajjii", "Guraandhala", "Bitooteessa", "Elba", "Caamsa", "Waxabajjii", "Adooleessa", "Hagayya", "Fuulbana", "Onkololeessa", "Sadaasa", "Muddee"}, + {"WD", "WB"}, +} + +var localeTableOmET = [5][]string{ + {"Dil", "Wix", "Qib", "Rob", "Kam", "Jim", "San"}, + {"Dilbata", "Wiixata", "Qibxata", "Roobii", "Kamiisa", "Jimaata", "Sanbata"}, + {"Ama", "Gur", "Bit", "Elb", "Cam", "Wax", "Ado", "Hag", "Ful", "Onk", "Sad", "Mud"}, + {"Amajjii", "Guraandhala", "Bitooteessa", "Elba", "Caamsa", "Waxabajjii", "Adooleessa", "Hagayya", "Fuulbana", "Onkololeessa", "Sadaasa", "Muddee"}, + {"WD", "WB"}, +} + +var localeTableOmKE = [5][]string{ + {"Dil", "Wix", "Qib", "Rob", "Kam", "Jim", "San"}, + {"Dilbata", "Wiixata", "Qibxata", "Roobii", "Kamiisa", "Jimaata", "Sanbata"}, + {"Ama", "Gur", "Bit", "Elb", "Cam", "Wax", "Ado", "Hag", "Ful", "Onk", "Sad", "Mud"}, + {"Amajjii", "Guraandhala", "Bitooteessa", "Elba", "Caamsa", "Waxabajjii", "Adooleessa", "Hagayya", "Fuulbana", "Onkololeessa", "Sadaasa", "Muddee"}, + {"WD", "WB"}, +} + +var localeTableOr = [5][]string{ + {"ରବି", "ସୋମ", "ମଙ୍ଗଳ", "ବୁଧ", "ଗୁରୁ", "ଶୁକ୍ର", "ଶନି"}, + {"ରବିବାର", "ସୋମବାର", "ମଙ୍ଗଳବାର", "ବୁଧବାର", "ଗୁରୁବାର", "ଶୁକ୍ରବାର", "ଶନିବାର"}, + {}, + {"ଜାନୁଆରୀ", "ଫେବୃଆରୀ", "ମାର୍ଚ୍ଚ", "ଅପ୍ରେଲ", "ମଇ", "ଜୁନ", "ଜୁଲାଇ", "ଅଗଷ୍ଟ", "ସେପ୍ଟେମ୍ବର", "ଅକ୍ଟୋବର", "ନଭେମ୍ବର", "ଡିସେମ୍ବର"}, + {"ପୂ", "ଅ"}, +} + +var localeTableOrIN = [5][]string{ + {"ରବି", "ସୋମ", "ମଙ୍ଗଳ", "ବୁଧ", "ଗୁରୁ", "ଶୁକ୍ର", "ଶନି"}, + {"ରବିବାର", "ସୋମବାର", "ମଙ୍ଗଳବାର", "ବୁଧବାର", "ଗୁରୁବାର", "ଶୁକ୍ରବାର", "ଶନିବାର"}, + {}, + {"ଜାନୁଆରୀ", "ଫେବୃଆରୀ", "ମାର୍ଚ୍ଚ", "ଅପ୍ରେଲ", "ମଇ", "ଜୁନ", "ଜୁଲାଇ", "ଅଗଷ୍ଟ", "ସେପ୍ଟେମ୍ବର", "ଅକ୍ଟୋବର", "ନଭେମ୍ବର", "ଡିସେମ୍ବର"}, + {"ପୂ", "ଅ"}, +} + +var localeTableOs = [5][]string{ + {"хцб", "крс", "дцг", "ӕрт", "цпр", "мрб", "сбт"}, + {"хуыцаубон", "къуырисӕр", "дыццӕг", "ӕртыццӕг", "цыппӕрӕм", "майрӕмбон", "сабат"}, + {"янв.", "фев.", "мар.", "апр.", "майы", "июны", "июлы", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январы", "февралы", "мартъийы", "апрелы", "майы", "июны", "июлы", "августы", "сентябры", "октябры", "ноябры", "декабры"}, + {}, +} + +var localeTableOsGE = [5][]string{ + {"хцб", "крс", "дцг", "ӕрт", "цпр", "мрб", "сбт"}, + {"хуыцаубон", "къуырисӕр", "дыццӕг", "ӕртыццӕг", "цыппӕрӕм", "майрӕмбон", "сабат"}, + {"янв.", "фев.", "мар.", "апр.", "майы", "июны", "июлы", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январы", "февралы", "мартъийы", "апрелы", "майы", "июны", "июлы", "августы", "сентябры", "октябры", "ноябры", "декабры"}, + {}, +} + +var localeTableOsRU = [5][]string{ + {"хцб", "крс", "дцг", "ӕрт", "цпр", "мрб", "сбт"}, + {"хуыцаубон", "къуырисӕр", "дыццӕг", "ӕртыццӕг", "цыппӕрӕм", "майрӕмбон", "сабат"}, + {"янв.", "фев.", "мар.", "апр.", "майы", "июны", "июлы", "авг.", "сен.", "окт.", "ноя.", "дек."}, + {"январы", "февралы", "мартъийы", "апрелы", "майы", "июны", "июлы", "августы", "сентябры", "октябры", "ноябры", "декабры"}, + {}, +} + +var localeTableOsa = [5][]string{ + {}, + {"𐒹𐓘͘𐓬𐓘 𐓏𐓘𐓤𐓘͘𐓰𐓘𐓤𐓣", "𐒹𐓘͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓈𐓘 𐓵𐓘𐓲𐓘 𐓻𐓣͘", "𐒹𐓘͘𐓬𐓘 𐓂𐓤𐓘𐓸𐓟 𐓣͘𐓤𐓟"}, + {"𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓵𐓪͘𐓬𐓘", "𐓵𐓘𐓜𐓣", "𐓰𐓪𐓬𐓘", "𐓮𐓘𐓰𐓘", "𐓯𐓘𐓬𐓟", "𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐒿𐓟𐓜𐓛", "𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {"𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓮𐓘𐓰𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓯𐓘𐓬𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {}, +} + +var localeTableOsaUS = [5][]string{ + {}, + {"𐒹𐓘͘𐓬𐓘 𐓏𐓘𐓤𐓘͘𐓰𐓘𐓤𐓣", "𐒹𐓘͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐒹𐓘͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐒹𐓘͘𐓬𐓘 𐓈𐓘 𐓵𐓘𐓲𐓘 𐓻𐓣͘", "𐒹𐓘͘𐓬𐓘 𐓂𐓤𐓘𐓸𐓟 𐓣͘𐓤𐓟"}, + {"𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓵𐓪͘𐓬𐓘", "𐓵𐓘𐓜𐓣", "𐓰𐓪𐓬𐓘", "𐓮𐓘𐓰𐓘", "𐓯𐓘𐓬𐓟", "𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐒿𐓟𐓜𐓛", "𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {"𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓘𐓡𐓛͘𐓧𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓵𐓘𐓜𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓮𐓘𐓰𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓏𐓟𐓯𐓘𐓬𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐓄𐓟𐓵𐓪͘𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒼𐓣𐓟𐓰𐓪𐓬𐓘", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛𐓲𐓟𐓷𐓣͘𐓤𐓟", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒿𐓟𐓜𐓛", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓏𐓣͘𐓸𐓲𐓣", "𐓀𐓣͘𐓪͘𐓬𐓘 𐒰𐓧𐓣 𐓍𐓪͘𐓬𐓘"}, + {}, +} + +var localeTablePa = [5][]string{ + {"ਐਤ", "ਸੋਮ", "ਮੰਗਲ", "ਬੁੱਧ", "ਵੀਰ", "ਸ਼ੁੱਕਰ", "ਸ਼ਨਿੱਚਰ"}, + {"ਐਤਵਾਰ", "ਸੋਮਵਾਰ", "ਮੰਗਲਵਾਰ", "ਬੁੱਧਵਾਰ", "ਵੀਰਵਾਰ", "ਸ਼ੁੱਕਰਵਾਰ", "ਸ਼ਨਿੱਚਰਵਾਰ"}, + {"ਜਨ", "ਫ਼ਰ", "ਮਾਰਚ", "ਅਪ੍ਰੈ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾ", "ਅਗ", "ਸਤੰ", "ਅਕਤੂ", "ਨਵੰ", "ਦਸੰ"}, + {"ਜਨਵਰੀ", "ਫ਼ਰਵਰੀ", "ਮਾਰਚ", "ਅਪ੍ਰੈਲ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾਈ", "ਅਗਸਤ", "ਸਤੰਬਰ", "ਅਕਤੂਬਰ", "ਨਵੰਬਰ", "ਦਸੰਬਰ"}, + {"ਪੂ.ਦੁ.", "ਬਾ.ਦੁ."}, +} + +var localeTablePaArab = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بُدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTablePaArabPK = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بُدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTablePaGuru = [5][]string{ + {"ਐਤ", "ਸੋਮ", "ਮੰਗਲ", "ਬੁੱਧ", "ਵੀਰ", "ਸ਼ੁੱਕਰ", "ਸ਼ਨਿੱਚਰ"}, + {"ਐਤਵਾਰ", "ਸੋਮਵਾਰ", "ਮੰਗਲਵਾਰ", "ਬੁੱਧਵਾਰ", "ਵੀਰਵਾਰ", "ਸ਼ੁੱਕਰਵਾਰ", "ਸ਼ਨਿੱਚਰਵਾਰ"}, + {"ਜਨ", "ਫ਼ਰ", "ਮਾਰਚ", "ਅਪ੍ਰੈ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾ", "ਅਗ", "ਸਤੰ", "ਅਕਤੂ", "ਨਵੰ", "ਦਸੰ"}, + {"ਜਨਵਰੀ", "ਫ਼ਰਵਰੀ", "ਮਾਰਚ", "ਅਪ੍ਰੈਲ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾਈ", "ਅਗਸਤ", "ਸਤੰਬਰ", "ਅਕਤੂਬਰ", "ਨਵੰਬਰ", "ਦਸੰਬਰ"}, + {"ਪੂ.ਦੁ.", "ਬਾ.ਦੁ."}, +} + +var localeTablePaGuruIN = [5][]string{ + {"ਐਤ", "ਸੋਮ", "ਮੰਗਲ", "ਬੁੱਧ", "ਵੀਰ", "ਸ਼ੁੱਕਰ", "ਸ਼ਨਿੱਚਰ"}, + {"ਐਤਵਾਰ", "ਸੋਮਵਾਰ", "ਮੰਗਲਵਾਰ", "ਬੁੱਧਵਾਰ", "ਵੀਰਵਾਰ", "ਸ਼ੁੱਕਰਵਾਰ", "ਸ਼ਨਿੱਚਰਵਾਰ"}, + {"ਜਨ", "ਫ਼ਰ", "ਮਾਰਚ", "ਅਪ੍ਰੈ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾ", "ਅਗ", "ਸਤੰ", "ਅਕਤੂ", "ਨਵੰ", "ਦਸੰ"}, + {"ਜਨਵਰੀ", "ਫ਼ਰਵਰੀ", "ਮਾਰਚ", "ਅਪ੍ਰੈਲ", "ਮਈ", "ਜੂਨ", "ਜੁਲਾਈ", "ਅਗਸਤ", "ਸਤੰਬਰ", "ਅਕਤੂਬਰ", "ਨਵੰਬਰ", "ਦਸੰਬਰ"}, + {"ਪੂ.ਦੁ.", "ਬਾ.ਦੁ."}, +} + +var localeTablePap = [5][]string{ + {}, + {"djadumingu", "djaluna", "djamars", "djarason", "djaweps", "djabièrnè", "djasabra"}, + {"Yan", "Feb", "Mar", "Apr", "Mei", "Yün", "Yül", "Oug", "Sèp", "Òkt", "Nov", "Des"}, + {"Yanüari", "Febrüari", "Mart", "Aprel", "Mei", "Yüni", "Yüli", "Ougùstùs", "Sèptèmber", "Òktober", "Novèmber", "Desèmber"}, + {}, +} + +var localeTablePapAW = [5][]string{ + {}, + {"djadumingu", "djaluna", "djamars", "djarason", "djaweps", "djabièrnè", "djasabra"}, + {"Yan", "Feb", "Mar", "Apr", "Mei", "Yün", "Yül", "Oug", "Sèp", "Òkt", "Nov", "Des"}, + {"Yanüari", "Febrüari", "Mart", "Aprel", "Mei", "Yüni", "Yüli", "Ougùstùs", "Sèptèmber", "Òktober", "Novèmber", "Desèmber"}, + {}, +} + +var localeTablePapCW = [5][]string{ + {}, + {"djadumingu", "djaluna", "djamars", "djarason", "djaweps", "djabièrnè", "djasabra"}, + {"Yan", "Feb", "Mar", "Apr", "Mei", "Yün", "Yül", "Oug", "Sèp", "Òkt", "Nov", "Des"}, + {"Yanüari", "Febrüari", "Mart", "Aprel", "Mei", "Yüni", "Yüli", "Ougùstùs", "Sèptèmber", "Òktober", "Novèmber", "Desèmber"}, + {}, +} + +var localeTablePcm = [5][]string{ + {"Sọ́n", "Mọ́n", "Tiú", "Wẹ́n", "Tọ́z", "Fraí", "Sát"}, + {"Sọ́ndè", "Mọ́ndè", "Tiúzdè", "Wẹ́nẹ́zdè", "Tọ́zdè", "Fraídè", "Sátọdè"}, + {"Jén", "Fẹ́b", "Mach", "Épr", "Mee", "Jun", "Jul", "Ọgọ", "Sẹp", "Ọkt", "Nọv", "Dis"}, + {"Jénúári", "Fẹ́búári", "Mach", "Éprel", "Mee", "Jun", "Julai", "Ọgọst", "Sẹptẹ́mba", "Ọktóba", "Nọvẹ́mba", "Disẹ́mba"}, + {"FM", "FI"}, +} + +var localeTablePcmNG = [5][]string{ + {"Sọ́n", "Mọ́n", "Tiú", "Wẹ́n", "Tọ́z", "Fraí", "Sát"}, + {"Sọ́ndè", "Mọ́ndè", "Tiúzdè", "Wẹ́nẹ́zdè", "Tọ́zdè", "Fraídè", "Sátọdè"}, + {"Jén", "Fẹ́b", "Mach", "Épr", "Mee", "Jun", "Jul", "Ọgọ", "Sẹp", "Ọkt", "Nọv", "Dis"}, + {"Jénúári", "Fẹ́búári", "Mach", "Éprel", "Mee", "Jun", "Julai", "Ọgọst", "Sẹptẹ́mba", "Ọktóba", "Nọvẹ́mba", "Disẹ́mba"}, + {"FM", "FI"}, +} + +var localeTablePis = [5][]string{ + {}, + {"Sande", "Mande", "Tiusde", "Wenesde", "Tosde", "Fraede", "Satade"}, + {}, + {"Januare", "Febuare", "Mas", "Eprel", "Mei", "Jun", "Julae", "Ogus", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {}, +} + +var localeTablePisSB = [5][]string{ + {}, + {"Sande", "Mande", "Tiusde", "Wenesde", "Tosde", "Fraede", "Satade"}, + {}, + {"Januare", "Febuare", "Mas", "Eprel", "Mei", "Jun", "Julae", "Ogus", "Septemba", "Oktoba", "Novemba", "Disemba"}, + {}, +} + +var localeTablePl = [5][]string{ + {"niedz.", "pon.", "wt.", "śr.", "czw.", "pt.", "sob."}, + {"niedziela", "poniedziałek", "wtorek", "środa", "czwartek", "piątek", "sobota"}, + {"sty", "lut", "mar", "kwi", "maj", "cze", "lip", "sie", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "maja", "czerwca", "lipca", "sierpnia", "września", "października", "listopada", "grudnia"}, + {"a", "p"}, +} + +var localeTablePlPL = [5][]string{ + {"niedz.", "pon.", "wt.", "śr.", "czw.", "pt.", "sob."}, + {"niedziela", "poniedziałek", "wtorek", "środa", "czwartek", "piątek", "sobota"}, + {"sty", "lut", "mar", "kwi", "maj", "cze", "lip", "sie", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "maja", "czerwca", "lipca", "sierpnia", "września", "października", "listopada", "grudnia"}, + {"a", "p"}, +} + +var localeTablePrg = [5][]string{ + {"nad", "pan", "wis", "pus", "ket", "pēn", "sab"}, + {"nadīli", "panadīli", "wisasīdis", "pussisawaiti", "ketwirtiks", "pēntniks", "sabattika"}, + {"rag", "was", "pūl", "sak", "zal", "sīm", "līp", "dag", "sil", "spa", "lap", "sal"}, + {"rags", "wassarins", "pūlis", "sakkis", "zallaws", "sīmenis", "līpa", "daggis", "sillins", "spallins", "lapkrūtis", "sallaws"}, + {}, +} + +var localeTablePrgPL = [5][]string{ + {"nad", "pan", "wis", "pus", "ket", "pēn", "sab"}, + {"nadīli", "panadīli", "wisasīdis", "pussisawaiti", "ketwirtiks", "pēntniks", "sabattika"}, + {"rag", "was", "pūl", "sak", "zal", "sīm", "līp", "dag", "sil", "spa", "lap", "sal"}, + {"rags", "wassarins", "pūlis", "sakkis", "zallaws", "sīmenis", "līpa", "daggis", "sillins", "spallins", "lapkrūtis", "sallaws"}, + {}, +} + +var localeTablePs = [5][]string{ + {}, + {"يونۍ", "دونۍ", "درېنۍ", "څلرنۍ", "پينځنۍ", "جمعه", "اونۍ"}, + {}, + {"جنوري", "فبروري", "مارچ", "اپریل", "مۍ", "جون", "جولای", "اګست", "سېپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"غ.م.", "غ.و."}, +} + +var localeTablePsAF = [5][]string{ + {}, + {"يونۍ", "دونۍ", "درېنۍ", "څلرنۍ", "پينځنۍ", "جمعه", "اونۍ"}, + {}, + {"جنوري", "فبروري", "مارچ", "اپریل", "مۍ", "جون", "جولای", "اګست", "سېپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"غ.م.", "غ.و."}, +} + +var localeTablePsPK = [5][]string{ + {}, + {"يونۍ", "دونۍ", "درېنۍ", "څلرنۍ", "پينځنۍ", "جمعه", "اونۍ"}, + {}, + {"جنوري", "فبروري", "مارچ", "اپریل", "مۍ", "جون", "جولای", "اګست", "سېپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"غ.م.", "غ.و."}, +} + +var localeTablePt = [5][]string{ + {"dom.", "seg.", "ter.", "qua.", "qui.", "sex.", "sáb."}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {}, +} + +var localeTablePtAO = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtBR = [5][]string{ + {"dom.", "seg.", "ter.", "qua.", "qui.", "sex.", "sáb."}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {}, +} + +var localeTablePtCH = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtCV = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtGQ = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtGW = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtLU = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtMO = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtMZ = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtPT = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtST = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTablePtTL = [5][]string{ + {"domingo", "segunda", "terça", "quarta", "quinta", "sexta", "sábado"}, + {"domingo", "segunda-feira", "terça-feira", "quarta-feira", "quinta-feira", "sexta-feira", "sábado"}, + {"jan.", "fev.", "mar.", "abr.", "mai.", "jun.", "jul.", "ago.", "set.", "out.", "nov.", "dez."}, + {"janeiro", "fevereiro", "março", "abril", "maio", "junho", "julho", "agosto", "setembro", "outubro", "novembro", "dezembro"}, + {"a.m.", "p.m."}, +} + +var localeTableQu = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableQuBO = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableQuEC = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableQuPE = [5][]string{ + {"Dom", "Lun", "Mar", "Mié", "Jue", "Vie", "Sab"}, + {"Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado"}, + {"Ene", "Feb", "Mar", "Abr", "May", "Jun", "Jul", "Ago", "Set", "Oct", "Nov", "Dic"}, + {"Enero", "Febrero", "Marzo", "Abril", "Mayo", "Junio", "Julio", "Agosto", "Setiembre", "Octubre", "Noviembre", "Diciembre"}, + {"a.m.", "p.m."}, +} + +var localeTableRaj = [5][]string{ + {}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableRajIN = [5][]string{ + {}, + {"रविवार", "सोमवार", "मंगलवार", "बुधवार", "गुरुवार", "शुक्रवार", "शनिवार"}, + {}, + {"जनवरी", "फरवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितम्बर", "अक्टूबर", "नवंबर", "दिसंबर"}, + {}, +} + +var localeTableRif = [5][]string{ + {"lḥe", "let", "ttl", "lar", "lex", "jje", "sse"}, + {"lḥed", "letnayen", "ttlat", "larbeɛ", "lexmis", "jjemɛa", "ssebt"}, + {"yen", "feb", "mar", "yeb", "may", "yun", "yul", "ɣuc", "cut", "kṭu", "nuw", "duj"}, + {"yennayer", "febrayer", "mars", "yebril", "mayyu", "yunyu", "yulyuz", "ɣucct", "cutenber", "kṭuber", "nuwember", "dujember"}, + {"a", "p"}, +} + +var localeTableRifMA = [5][]string{ + {"lḥe", "let", "ttl", "lar", "lex", "jje", "sse"}, + {"lḥed", "letnayen", "ttlat", "larbeɛ", "lexmis", "jjemɛa", "ssebt"}, + {"yen", "feb", "mar", "yeb", "may", "yun", "yul", "ɣuc", "cut", "kṭu", "nuw", "duj"}, + {"yennayer", "febrayer", "mars", "yebril", "mayyu", "yunyu", "yulyuz", "ɣucct", "cutenber", "kṭuber", "nuwember", "dujember"}, + {"a", "p"}, +} + +var localeTableRm = [5][]string{ + {"du", "gli", "ma", "me", "gie", "ve", "so"}, + {"dumengia", "glindesdi", "mardi", "mesemna", "gievgia", "venderdi", "sonda"}, + {"schan.", "favr.", "mars", "avr.", "matg", "zercl.", "fan.", "avust", "sett.", "oct.", "nov.", "dec."}, + {"da schaner", "da favrer", "da mars", "d’avrigl", "da matg", "da zercladur", "da fanadur", "d’avust", "da settember", "d’october", "da november", "da december"}, + {}, +} + +var localeTableRmCH = [5][]string{ + {"du", "gli", "ma", "me", "gie", "ve", "so"}, + {"dumengia", "glindesdi", "mardi", "mesemna", "gievgia", "venderdi", "sonda"}, + {"schan.", "favr.", "mars", "avr.", "matg", "zercl.", "fan.", "avust", "sett.", "oct.", "nov.", "dec."}, + {"da schaner", "da favrer", "da mars", "d’avrigl", "da matg", "da zercladur", "da fanadur", "d’avust", "da settember", "d’october", "da november", "da december"}, + {}, +} + +var localeTableRn = [5][]string{ + {"cu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku w’indwi", "Ku wa mbere", "Ku wa kabiri", "Ku wa gatatu", "Ku wa kane", "Ku wa gatanu", "Ku wa gatandatu"}, + {"Mut.", "Gas.", "Wer.", "Mat.", "Gic.", "Kam.", "Nya.", "Kan.", "Nze.", "Ukw.", "Ugu.", "Uku."}, + {"Nzero", "Ruhuhuma", "Ntwarante", "Ndamukiza", "Rusama", "Ruheshi", "Mukakaro", "Nyandagaro", "Nyakanga", "Gitugutu", "Munyonyo", "Kigarama"}, + {"Z.MU.", "Z.MW."}, +} + +var localeTableRnBI = [5][]string{ + {"cu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku w’indwi", "Ku wa mbere", "Ku wa kabiri", "Ku wa gatatu", "Ku wa kane", "Ku wa gatanu", "Ku wa gatandatu"}, + {"Mut.", "Gas.", "Wer.", "Mat.", "Gic.", "Kam.", "Nya.", "Kan.", "Nze.", "Ukw.", "Ugu.", "Uku."}, + {"Nzero", "Ruhuhuma", "Ntwarante", "Ndamukiza", "Rusama", "Ruheshi", "Mukakaro", "Nyandagaro", "Nyakanga", "Gitugutu", "Munyonyo", "Kigarama"}, + {"Z.MU.", "Z.MW."}, +} + +var localeTableRo = [5][]string{ + {"dum.", "lun.", "mar.", "mie.", "joi", "vin.", "sâm."}, + {"duminică", "luni", "marți", "miercuri", "joi", "vineri", "sâmbătă"}, + {"ian.", "feb.", "mar.", "apr.", "mai", "iun.", "iul.", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"ianuarie", "februarie", "martie", "aprilie", "mai", "iunie", "iulie", "august", "septembrie", "octombrie", "noiembrie", "decembrie"}, + {"a.m.", "p.m."}, +} + +var localeTableRoMD = [5][]string{ + {"Dum", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm"}, + {"duminică", "luni", "marți", "miercuri", "joi", "vineri", "sâmbătă"}, + {"ian.", "feb.", "mar.", "apr.", "mai", "iun.", "iul.", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"ianuarie", "februarie", "martie", "aprilie", "mai", "iunie", "iulie", "august", "septembrie", "octombrie", "noiembrie", "decembrie"}, + {"a.m.", "p.m."}, +} + +var localeTableRoRO = [5][]string{ + {"dum.", "lun.", "mar.", "mie.", "joi", "vin.", "sâm."}, + {"duminică", "luni", "marți", "miercuri", "joi", "vineri", "sâmbătă"}, + {"ian.", "feb.", "mar.", "apr.", "mai", "iun.", "iul.", "aug.", "sept.", "oct.", "nov.", "dec."}, + {"ianuarie", "februarie", "martie", "aprilie", "mai", "iunie", "iulie", "august", "septembrie", "octombrie", "noiembrie", "decembrie"}, + {"a.m.", "p.m."}, +} + +var localeTableRof = [5][]string{ + {"Ijp", "Ijt", "Ijn", "Ijtn", "Alh", "Iju", "Ijm"}, + {"Ijumapili", "Ijumatatu", "Ijumanne", "Ijumatano", "Alhamisi", "Ijumaa", "Ijumamosi"}, + {"M1", "M2", "M3", "M4", "M5", "M6", "M7", "M8", "M9", "M10", "M11", "M12"}, + {"Mweri wa kwanza", "Mweri wa kaili", "Mweri wa katatu", "Mweri wa kaana", "Mweri wa tanu", "Mweri wa sita", "Mweri wa saba", "Mweri wa nane", "Mweri wa tisa", "Mweri wa ikumi", "Mweri wa ikumi na moja", "Mweri wa ikumi na mbili"}, + {"kang’ama", "kingoto"}, +} + +var localeTableRofTZ = [5][]string{ + {"Ijp", "Ijt", "Ijn", "Ijtn", "Alh", "Iju", "Ijm"}, + {"Ijumapili", "Ijumatatu", "Ijumanne", "Ijumatano", "Alhamisi", "Ijumaa", "Ijumamosi"}, + {"M1", "M2", "M3", "M4", "M5", "M6", "M7", "M8", "M9", "M10", "M11", "M12"}, + {"Mweri wa kwanza", "Mweri wa kaili", "Mweri wa katatu", "Mweri wa kaana", "Mweri wa tanu", "Mweri wa sita", "Mweri wa saba", "Mweri wa nane", "Mweri wa tisa", "Mweri wa ikumi", "Mweri wa ikumi na moja", "Mweri wa ikumi na mbili"}, + {"kang’ama", "kingoto"}, +} + +var localeTableRu = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuBY = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuKG = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuKZ = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuMD = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuRU = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRuUA = [5][]string{ + {"вс", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"воскресенье", "понедельник", "вторник", "среда", "четверг", "пятница", "суббота"}, + {"янв.", "февр.", "мар.", "апр.", "мая", "июн.", "июл.", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"января", "февраля", "марта", "апреля", "мая", "июня", "июля", "августа", "сентября", "октября", "ноября", "декабря"}, + {}, +} + +var localeTableRw = [5][]string{ + {"cyu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku cyumweru", "Kuwa mbere", "Kuwa kabiri", "Kuwa gatatu", "Kuwa kane", "Kuwa gatanu", "Kuwa gatandatu"}, + {"mut.", "gas.", "wer.", "mat.", "gic.", "kam.", "nya.", "kan.", "nze.", "ukw.", "ugu.", "uku."}, + {"Mutarama", "Gashyantare", "Werurwe", "Mata", "Gicurasi", "Kamena", "Nyakanga", "Kanama", "Nzeli", "Ukwakira", "Ugushyingo", "Ukuboza"}, + {}, +} + +var localeTableRwRW = [5][]string{ + {"cyu.", "mbe.", "kab.", "gtu.", "kan.", "gnu.", "gnd."}, + {"Ku cyumweru", "Kuwa mbere", "Kuwa kabiri", "Kuwa gatatu", "Kuwa kane", "Kuwa gatanu", "Kuwa gatandatu"}, + {"mut.", "gas.", "wer.", "mat.", "gic.", "kam.", "nya.", "kan.", "nze.", "ukw.", "ugu.", "uku."}, + {"Mutarama", "Gashyantare", "Werurwe", "Mata", "Gicurasi", "Kamena", "Nyakanga", "Kanama", "Nzeli", "Ukwakira", "Ugushyingo", "Ukuboza"}, + {}, +} + +var localeTableRwk = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableRwkTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableSa = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवासरः", "सोमवासरः", "मंगलवासरः", "बुधवासरः", "गुरुवासर:", "शुक्रवासरः", "शनिवासरः"}, + {"जनवरी:", "फरवरी:", "मार्च:", "अप्रैल:", "मई", "जून:", "जुलाई:", "अगस्त:", "सितंबर:", "अक्तूबर:", "नवंबर:", "दिसंबर:"}, + {"जनवरीमासः", "फरवरीमासः", "मार्चमासः", "अप्रैलमासः", "मईमासः", "जूनमासः", "जुलाईमासः", "अगस्तमासः", "सितंबरमासः", "अक्तूबरमासः", "नवंबरमासः", "दिसंबरमासः"}, + {}, +} + +var localeTableSaIN = [5][]string{ + {"रवि", "सोम", "मंगल", "बुध", "गुरु", "शुक्र", "शनि"}, + {"रविवासरः", "सोमवासरः", "मंगलवासरः", "बुधवासरः", "गुरुवासर:", "शुक्रवासरः", "शनिवासरः"}, + {"जनवरी:", "फरवरी:", "मार्च:", "अप्रैल:", "मई", "जून:", "जुलाई:", "अगस्त:", "सितंबर:", "अक्तूबर:", "नवंबर:", "दिसंबर:"}, + {"जनवरीमासः", "फरवरीमासः", "मार्चमासः", "अप्रैलमासः", "मईमासः", "जूनमासः", "जुलाईमासः", "अगस्तमासः", "सितंबरमासः", "अक्तूबरमासः", "नवंबरमासः", "दिसंबरमासः"}, + {}, +} + +var localeTableSah = [5][]string{ + {"бс", "бн", "оп", "сэ", "чп", "бэ", "сб"}, + {"баскыһыанньа", "бэнидиэнньик", "оптуорунньук", "сэрэдэ", "чэппиэр", "Бээтиҥсэ", "субуота"}, + {"Тохс", "Олун", "Клн", "Мсу", "Ыам", "Бэс", "Отй", "Атр", "Блҕ", "Алт", "Сэт", "Ахс"}, + {"Тохсунньу", "Олунньу", "Кулун тутар", "Муус устар", "Ыам ыйын", "Бэс ыйын", "От ыйын", "Атырдьых ыйын", "Балаҕан ыйын", "Алтынньы", "Сэтинньи", "ахсынньы"}, + {"ЭИ", "ЭК"}, +} + +var localeTableSahRU = [5][]string{ + {"бс", "бн", "оп", "сэ", "чп", "бэ", "сб"}, + {"баскыһыанньа", "бэнидиэнньик", "оптуорунньук", "сэрэдэ", "чэппиэр", "Бээтиҥсэ", "субуота"}, + {"Тохс", "Олун", "Клн", "Мсу", "Ыам", "Бэс", "Отй", "Атр", "Блҕ", "Алт", "Сэт", "Ахс"}, + {"Тохсунньу", "Олунньу", "Кулун тутар", "Муус устар", "Ыам ыйын", "Бэс ыйын", "От ыйын", "Атырдьых ыйын", "Балаҕан ыйын", "Алтынньы", "Сэтинньи", "ахсынньы"}, + {"ЭИ", "ЭК"}, +} + +var localeTableSaq = [5][]string{ + {"Are", "Kun", "Ong", "Ine", "Ile", "Sap", "Kwe"}, + {"Mderot ee are", "Mderot ee kuni", "Mderot ee ong’wan", "Mderot ee inet", "Mderot ee ile", "Mderot ee sapa", "Mderot ee kwe"}, + {"Obo", "Waa", "Oku", "Ong", "Ime", "Ile", "Sap", "Isi", "Saa", "Tom", "Tob", "Tow"}, + {"Lapa le obo", "Lapa le waare", "Lapa le okuni", "Lapa le ong’wan", "Lapa le imet", "Lapa le ile", "Lapa le sapa", "Lapa le isiet", "Lapa le saal", "Lapa le tomon", "Lapa le tomon obo", "Lapa le tomon waare"}, + {"Tesiran", "Teipa"}, +} + +var localeTableSaqKE = [5][]string{ + {"Are", "Kun", "Ong", "Ine", "Ile", "Sap", "Kwe"}, + {"Mderot ee are", "Mderot ee kuni", "Mderot ee ong’wan", "Mderot ee inet", "Mderot ee ile", "Mderot ee sapa", "Mderot ee kwe"}, + {"Obo", "Waa", "Oku", "Ong", "Ime", "Ile", "Sap", "Isi", "Saa", "Tom", "Tob", "Tow"}, + {"Lapa le obo", "Lapa le waare", "Lapa le okuni", "Lapa le ong’wan", "Lapa le imet", "Lapa le ile", "Lapa le sapa", "Lapa le isiet", "Lapa le saal", "Lapa le tomon", "Lapa le tomon obo", "Lapa le tomon waare"}, + {"Tesiran", "Teipa"}, +} + +var localeTableSat = [5][]string{ + {"ᱥᱤᱸ", "ᱚᱛ", "ᱵᱟ", "ᱥᱟᱹ", "ᱥᱟᱹᱨ", "ᱡᱟᱹ", "ᱧᱩ"}, + {"ᱥᱤᱸᱜᱮ", "ᱚᱛᱮ", "ᱵᱟᱞᱮ", "ᱥᱟᱹᱜᱩᱱ", "ᱥᱟᱹᱨᱫᱤ", "ᱡᱟᱹᱨᱩᱢ", "ᱧᱩᱦᱩᱢ"}, + {"ᱡᱟᱱ", "ᱯᱷᱟ", "ᱢᱟᱨ", "ᱟᱯᱨ", "ᱢᱮ", "ᱡᱩᱱ", "ᱡᱩᱞ", "ᱟᱜᱟ", "ᱥᱮᱯ", "ᱚᱠᱴ", "ᱱᱟᱣ", "ᱫᱤᱥ"}, + {"ᱡᱟᱱᱣᱟᱨᱤ", "ᱯᱷᱟᱨᱣᱟᱨᱤ", "ᱢᱟᱨᱪ", "ᱟᱯᱨᱮᱞ", "ᱢᱮ", "ᱡᱩᱱ", "ᱡᱩᱞᱟᱭ", "ᱟᱜᱟᱥᱛ", "ᱥᱮᱯᱴᱮᱢᱵᱟᱨ", "ᱚᱠᱴᱚᱵᱟᱨ", "ᱱᱟᱣᱟᱢᱵᱟᱨ", "ᱫᱤᱥᱟᱢᱵᱟᱨ"}, + {}, +} + +var localeTableSbp = [5][]string{ + {"Mul", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Mulungu", "Jumatatu", "Jumanne", "Jumatano", "Alahamisi", "Ijumaa", "Jumamosi"}, + {"Mup", "Mwi", "Msh", "Mun", "Mag", "Muj", "Msp", "Mpg", "Mye", "Mok", "Mus", "Muh"}, + {"Mupalangulwa", "Mwitope", "Mushende", "Munyi", "Mushende Magali", "Mujimbi", "Mushipepo", "Mupuguto", "Munyense", "Mokhu", "Musongandembwe", "Muhaano"}, + {"Lwamilawu", "Pashamihe"}, +} + +var localeTableSbpTZ = [5][]string{ + {"Mul", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Mulungu", "Jumatatu", "Jumanne", "Jumatano", "Alahamisi", "Ijumaa", "Jumamosi"}, + {"Mup", "Mwi", "Msh", "Mun", "Mag", "Muj", "Msp", "Mpg", "Mye", "Mok", "Mus", "Muh"}, + {"Mupalangulwa", "Mwitope", "Mushende", "Munyi", "Mushende Magali", "Mujimbi", "Mushipepo", "Mupuguto", "Munyense", "Mokhu", "Musongandembwe", "Muhaano"}, + {"Lwamilawu", "Pashamihe"}, +} + +var localeTableSc = [5][]string{ + {"dom", "lun", "mar", "mèr", "giò", "che", "sàb"}, + {"domìniga", "lunis", "martis", "mèrcuris", "giòbia", "chenàbura", "sàbadu"}, + {"ghe", "fre", "mar", "abr", "maj", "làm", "trì", "aus", "cab", "stG", "stA", "nad"}, + {"ghennàrgiu", "freàrgiu", "martzu", "abrile", "maju", "làmpadas", "trìulas", "austu", "cabudanni", "santugaine", "santandria", "nadale"}, + {"m.", "b."}, +} + +var localeTableScIT = [5][]string{ + {"dom", "lun", "mar", "mèr", "giò", "che", "sàb"}, + {"domìniga", "lunis", "martis", "mèrcuris", "giòbia", "chenàbura", "sàbadu"}, + {"ghe", "fre", "mar", "abr", "maj", "làm", "trì", "aus", "cab", "stG", "stA", "nad"}, + {"ghennàrgiu", "freàrgiu", "martzu", "abrile", "maju", "làmpadas", "trìulas", "austu", "cabudanni", "santugaine", "santandria", "nadale"}, + {"m.", "b."}, +} + +var localeTableScn = [5][]string{ + {}, + {"dumìnica", "lunnidìa", "martidìa", "mercuridìa", "jovidìa", "vennidìa", "sàbbatu"}, + {"jin", "fri", "mar", "apr", "maj", "giu", "gnt", "agu", "sit", "utt", "nuv", "dic"}, + {"jinnaru", "frivaru", "marzu", "aprili", "maju", "giugnu", "giugnettu", "agustu", "sittèmmiru", "uttòviru", "nuvèmmiru", "dicèmmiru"}, + {}, +} + +var localeTableScnIT = [5][]string{ + {}, + {"dumìnica", "lunnidìa", "martidìa", "mercuridìa", "jovidìa", "vennidìa", "sàbbatu"}, + {"jin", "fri", "mar", "apr", "maj", "giu", "gnt", "agu", "sit", "utt", "nuv", "dic"}, + {"jinnaru", "frivaru", "marzu", "aprili", "maju", "giugnu", "giugnettu", "agustu", "sittèmmiru", "uttòviru", "nuvèmmiru", "dicèmmiru"}, + {}, +} + +var localeTableSd = [5][]string{ + {}, + {"آچر", "سومر", "اڱارو", "اربع", "خميس", "جمعو", "ڇنڇر"}, + {}, + {"جنوري", "فيبروري", "مارچ", "اپريل", "مئي", "جون", "جولاءِ", "آگسٽ", "سيپٽمبر", "آڪٽوبر", "نومبر", "ڊسمبر"}, + {"صبح،منجهند", "شام،منجهند"}, +} + +var localeTableSdArab = [5][]string{ + {}, + {"آچر", "سومر", "اڱارو", "اربع", "خميس", "جمعو", "ڇنڇر"}, + {}, + {"جنوري", "فيبروري", "مارچ", "اپريل", "مئي", "جون", "جولاءِ", "آگسٽ", "سيپٽمبر", "آڪٽوبر", "نومبر", "ڊسمبر"}, + {"صبح،منجهند", "شام،منجهند"}, +} + +var localeTableSdArabPK = [5][]string{ + {}, + {"آچر", "سومر", "اڱارو", "اربع", "خميس", "جمعو", "ڇنڇر"}, + {}, + {"جنوري", "فيبروري", "مارچ", "اپريل", "مئي", "جون", "جولاءِ", "آگسٽ", "سيپٽمبر", "آڪٽوبر", "نومبر", "ڊسمبر"}, + {"صبح،منجهند", "شام،منجهند"}, +} + +var localeTableSdDeva = [5][]string{ + {"आर्त", "सू", "मंग", "बु॒ध", "विस", "जुम", "छंछ"}, + {"आर्तवार", "सूमर", "मंगलु", "बु॒धर", "विस्पत", "जुमो", "छंछर"}, + {"जन", "फर", "मार्च", "अप्रै", "मई", "जून", "जु", "अग", "सप्टे", "ऑक्टो", "नवं", "डिसं"}, + {"जनवरी", "फरवरी", "मार्चु", "अप्रैल", "मई", "जून", "जुलाई", "अगस्ट", "सप्टेंबर", "ओक्टोबर", "नवंबर", "डिसंबर"}, + {}, +} + +var localeTableSdDevaIN = [5][]string{ + {"आर्त", "सू", "मंग", "बु॒ध", "विस", "जुम", "छंछ"}, + {"आर्तवार", "सूमर", "मंगलु", "बु॒धर", "विस्पत", "जुमो", "छंछर"}, + {"जन", "फर", "मार्च", "अप्रै", "मई", "जून", "जु", "अग", "सप्टे", "ऑक्टो", "नवं", "डिसं"}, + {"जनवरी", "फरवरी", "मार्चु", "अप्रैल", "मई", "जून", "जुलाई", "अगस्ट", "सप्टेंबर", "ओक्टोबर", "नवंबर", "डिसंबर"}, + {}, +} + +var localeTableSe = [5][]string{ + {"sotn", "vuos", "maŋ", "gask", "duor", "bear", "láv"}, + {"sotnabeaivi", "vuossárga", "maŋŋebárga", "gaskavahkku", "duorasdat", "bearjadat", "lávvardat"}, + {"ođđj", "guov", "njuk", "cuo", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"i.b.", "e.b."}, +} + +var localeTableSeFI = [5][]string{ + {"so", "má", "di", "ga", "du", "be", "lá"}, + {"sotnabeaivi", "mánnodat", "disdat", "gaskavahkku", "duorastat", "bearjadat", "lávvordat"}, + {"ođđj", "guov", "njuk", "cuoŋ", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"ib", "eb"}, +} + +var localeTableSeNO = [5][]string{ + {"sotn", "vuos", "maŋ", "gask", "duor", "bear", "láv"}, + {"sotnabeaivi", "vuossárga", "maŋŋebárga", "gaskavahkku", "duorasdat", "bearjadat", "lávvardat"}, + {"ođđj", "guov", "njuk", "cuo", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"i.b.", "e.b."}, +} + +var localeTableSeSE = [5][]string{ + {"sotn", "vuos", "maŋ", "gask", "duor", "bear", "láv"}, + {"sotnabeaivi", "vuossárga", "maŋŋebárga", "gaskavahkku", "duorasdat", "bearjadat", "lávvardat"}, + {"ođđj", "guov", "njuk", "cuo", "mies", "geas", "suoi", "borg", "čakč", "golg", "skáb", "juov"}, + {"ođđajagemánnu", "guovvamánnu", "njukčamánnu", "cuoŋománnu", "miessemánnu", "geassemánnu", "suoidnemánnu", "borgemánnu", "čakčamánnu", "golggotmánnu", "skábmamánnu", "juovlamánnu"}, + {"i.b.", "e.b."}, +} + +var localeTableSeh = [5][]string{ + {"Dim", "Pos", "Pir", "Tat", "Nai", "Sha", "Sab"}, + {"Dimingu", "Chiposi", "Chipiri", "Chitatu", "Chinai", "Chishanu", "Sabudu"}, + {"Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Aug", "Set", "Otu", "Nov", "Dec"}, + {"Janeiro", "Fevreiro", "Marco", "Abril", "Maio", "Junho", "Julho", "Augusto", "Setembro", "Otubro", "Novembro", "Decembro"}, + {}, +} + +var localeTableSehMZ = [5][]string{ + {"Dim", "Pos", "Pir", "Tat", "Nai", "Sha", "Sab"}, + {"Dimingu", "Chiposi", "Chipiri", "Chitatu", "Chinai", "Chishanu", "Sabudu"}, + {"Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Aug", "Set", "Otu", "Nov", "Dec"}, + {"Janeiro", "Fevreiro", "Marco", "Abril", "Maio", "Junho", "Julho", "Augusto", "Setembro", "Otubro", "Novembro", "Decembro"}, + {}, +} + +var localeTableSes = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableSesML = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Adduha", "Aluula"}, +} + +var localeTableSg = [5][]string{ + {"Bk1", "Bk2", "Bk3", "Bk4", "Bk5", "Lâp", "Lây"}, + {"Bikua-ôko", "Bïkua-ûse", "Bïkua-ptâ", "Bïkua-usïö", "Bïkua-okü", "Lâpôsö", "Lâyenga"}, + {"Nye", "Ful", "Mbä", "Ngu", "Bêl", "Fön", "Len", "Kük", "Mvu", "Ngb", "Nab", "Kak"}, + {"Nyenye", "Fulundïgi", "Mbängü", "Ngubùe", "Bêläwü", "Föndo", "Lengua", "Kükürü", "Mvuka", "Ngberere", "Nabändüru", "Kakauka"}, + {"ND", "LK"}, +} + +var localeTableSgCF = [5][]string{ + {"Bk1", "Bk2", "Bk3", "Bk4", "Bk5", "Lâp", "Lây"}, + {"Bikua-ôko", "Bïkua-ûse", "Bïkua-ptâ", "Bïkua-usïö", "Bïkua-okü", "Lâpôsö", "Lâyenga"}, + {"Nye", "Ful", "Mbä", "Ngu", "Bêl", "Fön", "Len", "Kük", "Mvu", "Ngb", "Nab", "Kak"}, + {"Nyenye", "Fulundïgi", "Mbängü", "Ngubùe", "Bêläwü", "Föndo", "Lengua", "Kükürü", "Mvuka", "Ngberere", "Nabändüru", "Kakauka"}, + {"ND", "LK"}, +} + +var localeTableShi = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableShiLatn = [5][]string{ + {"asa", "ayn", "asi", "akṛ", "akw", "asim", "asiḍ"}, + {"asamas", "aynas", "asinas", "akṛas", "akwas", "asimwas", "asiḍyas"}, + {"inn", "bṛa", "maṛ", "ibr", "may", "yun", "yul", "ɣuc", "cut", "ktu", "nuw", "duj"}, + {"innayr", "bṛayṛ", "maṛṣ", "ibrir", "mayyu", "yunyu", "yulyuz", "ɣuct", "cutanbir", "ktubr", "nuwanbir", "dujanbir"}, + {"tifawt", "tadggʷat"}, +} + +var localeTableShiLatnMA = [5][]string{ + {"asa", "ayn", "asi", "akṛ", "akw", "asim", "asiḍ"}, + {"asamas", "aynas", "asinas", "akṛas", "akwas", "asimwas", "asiḍyas"}, + {"inn", "bṛa", "maṛ", "ibr", "may", "yun", "yul", "ɣuc", "cut", "ktu", "nuw", "duj"}, + {"innayr", "bṛayṛ", "maṛṣ", "ibrir", "mayyu", "yunyu", "yulyuz", "ɣuct", "cutanbir", "ktubr", "nuwanbir", "dujanbir"}, + {"tifawt", "tadggʷat"}, +} + +var localeTableShiTfng = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableShiTfngMA = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableSi = [5][]string{ + {"ඉරිදා", "සඳුදා", "අඟහ", "බදාදා", "බ්‍රහස්", "සිකු", "සෙන"}, + {"ඉරිදා", "සඳුදා", "අඟහරුවාදා", "බදාදා", "බ්‍රහස්පතින්දා", "සිකුරාදා", "සෙනසුරාදා"}, + {"ජන", "පෙබ", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝ", "සැප්", "ඔක්", "නොවැ", "දෙසැ"}, + {"ජනවාරි", "පෙබරවාරි", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝස්තු", "සැප්තැම්බර්", "ඔක්තෝබර්", "නොවැම්බර්", "දෙසැම්බර්"}, + {"පෙ.ව.", "ප.ව."}, +} + +var localeTableSiLK = [5][]string{ + {"ඉරිදා", "සඳුදා", "අඟහ", "බදාදා", "බ්‍රහස්", "සිකු", "සෙන"}, + {"ඉරිදා", "සඳුදා", "අඟහරුවාදා", "බදාදා", "බ්‍රහස්පතින්දා", "සිකුරාදා", "සෙනසුරාදා"}, + {"ජන", "පෙබ", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝ", "සැප්", "ඔක්", "නොවැ", "දෙසැ"}, + {"ජනවාරි", "පෙබරවාරි", "මාර්තු", "අප්‍රේල්", "මැයි", "ජූනි", "ජූලි", "අගෝස්තු", "සැප්තැම්බර්", "ඔක්තෝබර්", "නොවැම්බර්", "දෙසැම්බර්"}, + {"පෙ.ව.", "ප.ව."}, +} + +var localeTableSid = [5][]string{ + {"Sam", "San", "Mak", "Row", "Ham", "Arb", "Qid"}, + {"Sambata", "Sanyo", "Maakisanyo", "Roowe", "Hamuse", "Arbe", "Qidaame"}, + {}, + {}, + {"soodo", "hawwaro"}, +} + +var localeTableSidET = [5][]string{ + {"Sam", "San", "Mak", "Row", "Ham", "Arb", "Qid"}, + {"Sambata", "Sanyo", "Maakisanyo", "Roowe", "Hamuse", "Arbe", "Qidaame"}, + {}, + {}, + {"soodo", "hawwaro"}, +} + +var localeTableSk = [5][]string{ + {"ne", "po", "ut", "st", "št", "pi", "so"}, + {"nedeľa", "pondelok", "utorok", "streda", "štvrtok", "piatok", "sobota"}, + {"jan", "feb", "mar", "apr", "máj", "jún", "júl", "aug", "sep", "okt", "nov", "dec"}, + {"januára", "februára", "marca", "apríla", "mája", "júna", "júla", "augusta", "septembra", "októbra", "novembra", "decembra"}, + {}, +} + +var localeTableSkSK = [5][]string{ + {"ne", "po", "ut", "st", "št", "pi", "so"}, + {"nedeľa", "pondelok", "utorok", "streda", "štvrtok", "piatok", "sobota"}, + {"jan", "feb", "mar", "apr", "máj", "jún", "júl", "aug", "sep", "okt", "nov", "dec"}, + {"januára", "februára", "marca", "apríla", "mája", "júna", "júla", "augusta", "septembra", "októbra", "novembra", "decembra"}, + {}, +} + +var localeTableSkr = [5][]string{ + {}, + {"اتوار", "سوموار", "منگل", "ٻدھ", "خمیس", "جمعہ", "چھݨ چھݨ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableSkrPK = [5][]string{ + {}, + {"اتوار", "سوموار", "منگل", "ٻدھ", "خمیس", "جمعہ", "چھݨ چھݨ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableSl = [5][]string{ + {"ned.", "pon.", "tor.", "sre.", "čet.", "pet.", "sob."}, + {"nedelja", "ponedeljek", "torek", "sreda", "četrtek", "petek", "sobota"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "avg.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marec", "april", "maj", "junij", "julij", "avgust", "september", "oktober", "november", "december"}, + {"dop.", "pop."}, +} + +var localeTableSlSI = [5][]string{ + {"ned.", "pon.", "tor.", "sre.", "čet.", "pet.", "sob."}, + {"nedelja", "ponedeljek", "torek", "sreda", "četrtek", "petek", "sobota"}, + {"jan.", "feb.", "mar.", "apr.", "maj", "jun.", "jul.", "avg.", "sep.", "okt.", "nov.", "dec."}, + {"januar", "februar", "marec", "april", "maj", "junij", "julij", "avgust", "september", "oktober", "november", "december"}, + {"dop.", "pop."}, +} + +var localeTableSmn = [5][]string{ + {"pas", "vuo", "maj", "kos", "tuo", "vás", "láv"}, + {"pasepeeivi", "vuossaargâ", "majebaargâ", "koskoho", "tuorâstuv", "vástuppeeivi", "lávurduv"}, + {"uđiv", "kuovâ", "njuhčâ", "cuáŋui", "vyesi", "kesi", "syeini", "porge", "čohčâ", "roovvâd", "skammâ", "juovlâ"}, + {"uđđâivemáánu", "kuovâmáánu", "njuhčâmáánu", "cuáŋuimáánu", "vyesimáánu", "kesimáánu", "syeinimáánu", "porgemáánu", "čohčâmáánu", "roovvâdmáánu", "skammâmáánu", "juovlâmáánu"}, + {"ip.", "ep."}, +} + +var localeTableSmnFI = [5][]string{ + {"pas", "vuo", "maj", "kos", "tuo", "vás", "láv"}, + {"pasepeeivi", "vuossaargâ", "majebaargâ", "koskoho", "tuorâstuv", "vástuppeeivi", "lávurduv"}, + {"uđiv", "kuovâ", "njuhčâ", "cuáŋui", "vyesi", "kesi", "syeini", "porge", "čohčâ", "roovvâd", "skammâ", "juovlâ"}, + {"uđđâivemáánu", "kuovâmáánu", "njuhčâmáánu", "cuáŋuimáánu", "vyesimáánu", "kesimáánu", "syeinimáánu", "porgemáánu", "čohčâmáánu", "roovvâdmáánu", "skammâmáánu", "juovlâmáánu"}, + {"ip.", "ep."}, +} + +var localeTableSn = [5][]string{ + {"Svo", "Muv", "Chp", "Cht", "Chn", "Chs", "Mug"}, + {"Svondo", "Muvhuro", "Chipiri", "Chitatu", "China", "Chishanu", "Mugovera"}, + {"Ndi", "Kuk", "Kur", "Kub", "Chv", "Chk", "Chg", "Nya", "Gun", "Gum", "Mbu", "Zvi"}, + {"Ndira", "Kukadzi", "Kurume", "Kubvumbi", "Chivabvu", "Chikumi", "Chikunguru", "Nyamavhuvhu", "Gunyana", "Gumiguru", "Mbudzi", "Zvita"}, + {"a", "p"}, +} + +var localeTableSnZW = [5][]string{ + {"Svo", "Muv", "Chp", "Cht", "Chn", "Chs", "Mug"}, + {"Svondo", "Muvhuro", "Chipiri", "Chitatu", "China", "Chishanu", "Mugovera"}, + {"Ndi", "Kuk", "Kur", "Kub", "Chv", "Chk", "Chg", "Nya", "Gun", "Gum", "Mbu", "Zvi"}, + {"Ndira", "Kukadzi", "Kurume", "Kubvumbi", "Chivabvu", "Chikumi", "Chikunguru", "Nyamavhuvhu", "Gunyana", "Gumiguru", "Mbudzi", "Zvita"}, + {"a", "p"}, +} + +var localeTableSo = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoDJ = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoET = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoKE = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSoSO = [5][]string{ + {"Axd", "Isn", "Tldo", "Arbc", "Khms", "Jmc", "Sbti"}, + {"Axad", "Isniin", "Talaado", "Arbaco", "Khamiis", "Jimco", "Sabti"}, + {"Jan", "Feb", "Mar", "Abr", "May", "Jun", "Lul", "Ogs", "Seb", "Okt", "Nof", "Dis"}, + {"Bisha Koobaad", "Bisha Labaad", "Bisha Saddexaad", "Bisha Afraad", "Bisha Shanaad", "Bisha Lixaad", "Bisha Todobaad", "Bisha Sideedaad", "Bisha Sagaalaad", "Bisha Tobnaad", "Bisha Kow iyo Tobnaad", "Bisha Laba iyo Tobnaad"}, + {"GH", "GD"}, +} + +var localeTableSq = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSqAL = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSqMK = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSqXK = [5][]string{ + {"die", "hën", "mar", "mër", "enj", "pre", "sht"}, + {"e diel", "e hënë", "e martë", "e mërkurë", "e enjte", "e premte", "e shtunë"}, + {"jan", "shk", "mar", "pri", "maj", "qer", "korr", "gush", "sht", "tet", "nën", "dhj"}, + {"janar", "shkurt", "mars", "prill", "maj", "qershor", "korrik", "gusht", "shtator", "tetor", "nëntor", "dhjetor"}, + {"p.d.", "m.d."}, +} + +var localeTableSr = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrCyrl = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrCyrlBA = [5][]string{ + {"нед", "пон", "уто", "сри", "чет", "пет", "суб"}, + {"недјеља", "понедјељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {"пријеподне", "поподне"}, +} + +var localeTableSrCyrlME = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недјеља", "понедељак", "уторак", "сриједа", "четвртак", "петак", "субота"}, + {"јан", "феб", "март", "апр", "мај", "јун", "јул", "авг", "септ", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {"пријеподне", "поподне"}, +} + +var localeTableSrCyrlRS = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "мар", "апр", "мај", "јун", "јул", "авг", "сеп", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrCyrlXK = [5][]string{ + {"нед", "пон", "уто", "сре", "чет", "пет", "суб"}, + {"недеља", "понедељак", "уторак", "среда", "четвртак", "петак", "субота"}, + {"јан", "феб", "март", "апр", "мај", "јун", "јул", "авг", "септ", "окт", "нов", "дец"}, + {"јануар", "фебруар", "март", "април", "мај", "јун", "јул", "август", "септембар", "октобар", "новембар", "децембар"}, + {}, +} + +var localeTableSrLatn = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedelja", "ponedeljak", "utorak", "sreda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "avg", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {}, +} + +var localeTableSrLatnBA = [5][]string{ + {"ned", "pon", "uto", "sri", "čet", "pet", "sub"}, + {"nedjelja", "ponedjeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "avg", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableSrLatnME = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedjelja", "ponedeljak", "utorak", "srijeda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mart", "apr", "maj", "jun", "jul", "avg", "sept", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {"prijepodne", "popodne"}, +} + +var localeTableSrLatnRS = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedelja", "ponedeljak", "utorak", "sreda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mar", "apr", "maj", "jun", "jul", "avg", "sep", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {}, +} + +var localeTableSrLatnXK = [5][]string{ + {"ned", "pon", "uto", "sre", "čet", "pet", "sub"}, + {"nedelja", "ponedeljak", "utorak", "sreda", "četvrtak", "petak", "subota"}, + {"jan", "feb", "mart", "apr", "maj", "jun", "jul", "avg", "sept", "okt", "nov", "dec"}, + {"januar", "februar", "mart", "april", "maj", "jun", "jul", "avgust", "septembar", "oktobar", "novembar", "decembar"}, + {}, +} + +var localeTableSs = [5][]string{ + {"Son", "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc"}, + {"Lisontfo", "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo"}, + {"Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo"}, + {"Bhimbidvwane", "iNdlovana", "iNdlovu-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni"}, + {}, +} + +var localeTableSsSZ = [5][]string{ + {"Son", "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc"}, + {"Lisontfo", "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo"}, + {"Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo"}, + {"Bhimbidvwane", "iNdlovana", "iNdlovu-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni"}, + {}, +} + +var localeTableSsZA = [5][]string{ + {"Son", "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc"}, + {"Lisontfo", "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo"}, + {"Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo"}, + {"Bhimbidvwane", "iNdlovana", "iNdlovu-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni"}, + {}, +} + +var localeTableSsy = [5][]string{ + {"Nab", "San", "Sal", "Rab", "Cam", "Jum", "Qun"}, + {"Naba Sambat", "Sani", "Salus", "Rabuq", "Camus", "Jumqata", "Qunxa Sambat"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableSsyER = [5][]string{ + {"Nab", "San", "Sal", "Rab", "Cam", "Jum", "Qun"}, + {"Naba Sambat", "Sani", "Salus", "Rabuq", "Camus", "Jumqata", "Qunxa Sambat"}, + {"Qun", "Nah", "Cig", "Agd", "Cax", "Qas", "Qad", "Leq", "Way", "Dit", "Xim", "Kax"}, + {"Qunxa Garablu", "Kudo", "Ciggilta Kudo", "Agda Baxis", "Caxah Alsa", "Qasa Dirri", "Qado Dirri", "Liiqen", "Waysu", "Diteli", "Ximoli", "Kaxxa Garablu"}, + {"saaku", "carra"}, +} + +var localeTableSt = [5][]string{ + {"Son", "Mma", "Bed", "Rar", "Ne", "Hla", "Moq"}, + {"Sontaha", "Mmantaha", "Labobedi", "Laboraru", "Labone", "Labohlane", "Moqebelo"}, + {"Phe", "Kol", "Ube", "Mme", "Mot", "Jan", "Upu", "Pha", "Leo", "Mph", "Pun", "Tsh"}, + {"Phesekgong", "Hlakola", "Hlakubele", "Mmese", "Motsheanong", "Phupjane", "Phupu", "Phata", "Leotshe", "Mphalane", "Pundungwane", "Tshitwe"}, + {}, +} + +var localeTableStLS = [5][]string{ + {"Son", "Mma", "Bed", "Rar", "Ne", "Hla", "Moq"}, + {"Sontaha", "Mmantaha", "Labobedi", "Laboraru", "Labone", "Labohlane", "Moqebelo"}, + {"Phe", "Kol", "Ube", "Mme", "Mot", "Jan", "Upu", "Pha", "Leo", "Mph", "Pun", "Tsh"}, + {"Phesekgong", "Hlakola", "Hlakubele", "Mmese", "Motsheanong", "Phupjane", "Phupu", "Phata", "Leotshe", "Mphalane", "Pundungwane", "Tshitwe"}, + {}, +} + +var localeTableStZA = [5][]string{ + {"Son", "Mma", "Bed", "Rar", "Ne", "Hla", "Moq"}, + {"Sontaha", "Mmantaha", "Labobedi", "Laboraru", "Labone", "Labohlane", "Moqebelo"}, + {"Phe", "Kol", "Ube", "Mme", "Mot", "Jan", "Upu", "Pha", "Leo", "Mph", "Pun", "Tsh"}, + {"Phesekgong", "Hlakola", "Hlakubele", "Mmese", "Motsheanong", "Phupjane", "Phupu", "Phata", "Leotshe", "Mphalane", "Pundungwane", "Tshitwe"}, + {}, +} + +var localeTableSu = [5][]string{ + {"Mng", "Sen", "Sal", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senén", "Salasa", "Rebo", "Kemis", "Jumaah", "Saptu"}, + {"Jan", "Péb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sép", "Okt", "Nop", "Dés"}, + {"Januari", "Pébruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Séptémber", "Oktober", "Nopémber", "Désémber"}, + {}, +} + +var localeTableSuLatn = [5][]string{ + {"Mng", "Sen", "Sal", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senén", "Salasa", "Rebo", "Kemis", "Jumaah", "Saptu"}, + {"Jan", "Péb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sép", "Okt", "Nop", "Dés"}, + {"Januari", "Pébruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Séptémber", "Oktober", "Nopémber", "Désémber"}, + {}, +} + +var localeTableSuLatnID = [5][]string{ + {"Mng", "Sen", "Sal", "Reb", "Kem", "Jum", "Sap"}, + {"Minggu", "Senén", "Salasa", "Rebo", "Kemis", "Jumaah", "Saptu"}, + {"Jan", "Péb", "Mar", "Apr", "Méi", "Jun", "Jul", "Ags", "Sép", "Okt", "Nop", "Dés"}, + {"Januari", "Pébruari", "Maret", "April", "Méi", "Juni", "Juli", "Agustus", "Séptémber", "Oktober", "Nopémber", "Désémber"}, + {}, +} + +var localeTableSv = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSvAX = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSvFI = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSvSE = [5][]string{ + {"sön", "mån", "tis", "ons", "tors", "fre", "lör"}, + {"söndag", "måndag", "tisdag", "onsdag", "torsdag", "fredag", "lördag"}, + {"jan.", "feb.", "mars", "apr.", "maj", "juni", "juli", "aug.", "sep.", "okt.", "nov.", "dec."}, + {"januari", "februari", "mars", "april", "maj", "juni", "juli", "augusti", "september", "oktober", "november", "december"}, + {"fm", "em"}, +} + +var localeTableSw = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwCD = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwKE = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwTZ = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSwUG = [5][]string{ + {}, + {"Jumapili", "Jumatatu", "Jumanne", "Jumatano", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprili", "Mei", "Juni", "Julai", "Agosti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"am", "pm"}, +} + +var localeTableSyr = [5][]string{ + {"ܚܕ", "ܬܪܝܢ", "ܬܠܬ", "ܐܪܒܥ", "ܚܡܫ", "ܥܪܘ", "ܫܒܬܐ"}, + {"ܚܕܒܫܒܐ", "ܬܪܝܢܒܫܒܐ", "ܬܠܬܒܫܒܐ", "ܐܪܒܥܒܫܒܐ", "ܚܡܫܒܫܒܐ", "ܥܪܘܒܬܐ", "ܫܒܬܐ"}, + {"ܟܢܘܢ ܒ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܐ", "ܬܫܪܝܢ ܒ", "ܟܢܘܢ ܐ"}, + {"ܟܢܘܢ ܐܚܪܝܐ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܩܕܡܝܐ", "ܬܫܪܝܢ ܐܚܪܝܐ", "ܟܢܘܢ ܩܕܡܝܐ"}, + {"܏ܩܛ‌", "܏ܒܛ‌"}, +} + +var localeTableSyrIQ = [5][]string{ + {"ܚܕ", "ܬܪܝܢ", "ܬܠܬ", "ܐܪܒܥ", "ܚܡܫ", "ܥܪܘ", "ܫܒܬܐ"}, + {"ܚܕܒܫܒܐ", "ܬܪܝܢܒܫܒܐ", "ܬܠܬܒܫܒܐ", "ܐܪܒܥܒܫܒܐ", "ܚܡܫܒܫܒܐ", "ܥܪܘܒܬܐ", "ܫܒܬܐ"}, + {"ܟܢܘܢ ܒ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܐ", "ܬܫܪܝܢ ܒ", "ܟܢܘܢ ܐ"}, + {"ܟܢܘܢ ܐܚܪܝܐ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܩܕܡܝܐ", "ܬܫܪܝܢ ܐܚܪܝܐ", "ܟܢܘܢ ܩܕܡܝܐ"}, + {"܏ܩܛ‌", "܏ܒܛ‌"}, +} + +var localeTableSyrSY = [5][]string{ + {"ܚܕ", "ܬܪܝܢ", "ܬܠܬ", "ܐܪܒܥ", "ܚܡܫ", "ܥܪܘ", "ܫܒܬܐ"}, + {"ܚܕܒܫܒܐ", "ܬܪܝܢܒܫܒܐ", "ܬܠܬܒܫܒܐ", "ܐܪܒܥܒܫܒܐ", "ܚܡܫܒܫܒܐ", "ܥܪܘܒܬܐ", "ܫܒܬܐ"}, + {"ܟܢܘܢ ܒ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܐ", "ܬܫܪܝܢ ܒ", "ܟܢܘܢ ܐ"}, + {"ܟܢܘܢ ܐܚܪܝܐ", "ܫܒܛ", "ܐܕܪ", "ܢܝܣܢ", "ܐܝܪ", "ܚܙܝܪܢ", "ܬܡܘܙ", "ܐܒ", "ܐܝܠܘܠ", "ܬܫܪܝܢ ܩܕܡܝܐ", "ܬܫܪܝܢ ܐܚܪܝܐ", "ܟܢܘܢ ܩܕܡܝܐ"}, + {"܏ܩܛ‌", "܏ܒܛ‌"}, +} + +var localeTableSzl = [5][]string{ + {"niy", "pyń", "wto", "str", "szt", "piō", "sob"}, + {"niydziela", "pyńdziałek", "wtorek", "strzoda", "sztwortek", "piōntek", "sobota"}, + {"sty", "lut", "mar", "kwi", "moj", "czy", "lip", "siy", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "moja", "czyrwca", "lipca", "siyrpnia", "września", "października", "listopada", "grudnia"}, + {"dopołedniŏ", "popołedniu"}, +} + +var localeTableSzlPL = [5][]string{ + {"niy", "pyń", "wto", "str", "szt", "piō", "sob"}, + {"niydziela", "pyńdziałek", "wtorek", "strzoda", "sztwortek", "piōntek", "sobota"}, + {"sty", "lut", "mar", "kwi", "moj", "czy", "lip", "siy", "wrz", "paź", "lis", "gru"}, + {"stycznia", "lutego", "marca", "kwietnia", "moja", "czyrwca", "lipca", "siyrpnia", "września", "października", "listopada", "grudnia"}, + {"dopołedniŏ", "popołedniu"}, +} + +var localeTableTa = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaIN = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaLK = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaMY = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTaSG = [5][]string{ + {"ஞாயி.", "திங்.", "செவ்.", "புத.", "வியா.", "வெள்.", "சனி"}, + {"ஞாயிறு", "திங்கள்", "செவ்வாய்", "புதன்", "வியாழன்", "வெள்ளி", "சனி"}, + {"ஜன.", "பிப்.", "மார்.", "ஏப்.", "மே", "ஜூன்", "ஜூலை", "ஆக.", "செப்.", "அக்.", "நவ.", "டிச."}, + {"ஜனவரி", "பிப்ரவரி", "மார்ச்", "ஏப்ரல்", "மே", "ஜூன்", "ஜூலை", "ஆகஸ்ட்", "செப்டம்பர்", "அக்டோபர்", "நவம்பர்", "டிசம்பர்"}, + {}, +} + +var localeTableTe = [5][]string{ + {"ఆది", "సోమ", "మంగళ", "బుధ", "గురు", "శుక్ర", "శని"}, + {"ఆదివారం", "సోమవారం", "మంగళవారం", "బుధవారం", "గురువారం", "శుక్రవారం", "శనివారం"}, + {"జన", "ఫిబ్ర", "మార్చి", "ఏప్రి", "మే", "జూన్", "జులై", "ఆగ", "సెప్టెం", "అక్టో", "నవం", "డిసెం"}, + {"జనవరి", "ఫిబ్రవరి", "మార్చి", "ఏప్రిల్", "మే", "జూన్", "జులై", "ఆగస్టు", "సెప్టెంబర్", "అక్టోబర్", "నవంబర్", "డిసెంబర్"}, + {"ఉ", "సా"}, +} + +var localeTableTeIN = [5][]string{ + {"ఆది", "సోమ", "మంగళ", "బుధ", "గురు", "శుక్ర", "శని"}, + {"ఆదివారం", "సోమవారం", "మంగళవారం", "బుధవారం", "గురువారం", "శుక్రవారం", "శనివారం"}, + {"జన", "ఫిబ్ర", "మార్చి", "ఏప్రి", "మే", "జూన్", "జులై", "ఆగ", "సెప్టెం", "అక్టో", "నవం", "డిసెం"}, + {"జనవరి", "ఫిబ్రవరి", "మార్చి", "ఏప్రిల్", "మే", "జూన్", "జులై", "ఆగస్టు", "సెప్టెంబర్", "అక్టోబర్", "నవంబర్", "డిసెంబర్"}, + {"ఉ", "సా"}, +} + +var localeTableTeo = [5][]string{ + {"Jum", "Bar", "Aar", "Uni", "Ung", "Kan", "Sab"}, + {"Nakaejuma", "Nakaebarasa", "Nakaare", "Nakauni", "Nakaung’on", "Nakakany", "Nakasabiti"}, + {"Rar", "Muk", "Kwa", "Dun", "Mar", "Mod", "Jol", "Ped", "Sok", "Tib", "Lab", "Poo"}, + {"Orara", "Omuk", "Okwamg’", "Odung’el", "Omaruk", "Omodok’king’ol", "Ojola", "Opedel", "Osokosokoma", "Otibar", "Olabor", "Opoo"}, + {"Taparachu", "Ebongi"}, +} + +var localeTableTeoKE = [5][]string{ + {"Jum", "Bar", "Aar", "Uni", "Ung", "Kan", "Sab"}, + {"Nakaejuma", "Nakaebarasa", "Nakaare", "Nakauni", "Nakaung’on", "Nakakany", "Nakasabiti"}, + {"Rar", "Muk", "Kwa", "Dun", "Mar", "Mod", "Jol", "Ped", "Sok", "Tib", "Lab", "Poo"}, + {"Orara", "Omuk", "Okwamg’", "Odung’el", "Omaruk", "Omodok’king’ol", "Ojola", "Opedel", "Osokosokoma", "Otibar", "Olabor", "Opoo"}, + {"Taparachu", "Ebongi"}, +} + +var localeTableTeoUG = [5][]string{ + {"Jum", "Bar", "Aar", "Uni", "Ung", "Kan", "Sab"}, + {"Nakaejuma", "Nakaebarasa", "Nakaare", "Nakauni", "Nakaung’on", "Nakakany", "Nakasabiti"}, + {"Rar", "Muk", "Kwa", "Dun", "Mar", "Mod", "Jol", "Ped", "Sok", "Tib", "Lab", "Poo"}, + {"Orara", "Omuk", "Okwamg’", "Odung’el", "Omaruk", "Omodok’king’ol", "Ojola", "Opedel", "Osokosokoma", "Otibar", "Olabor", "Opoo"}, + {"Taparachu", "Ebongi"}, +} + +var localeTableTg = [5][]string{ + {"Яшб", "Дшб", "Сшб", "Чшб", "Пшб", "Ҷмъ", "Шнб"}, + {"Якшанбе", "Душанбе", "Сешанбе", "Чоршанбе", "Панҷшанбе", "Ҷумъа", "Шанбе"}, + {"Янв", "Фев", "Мар", "Апр", "Май", "Июн", "Июл", "Авг", "Сен", "Окт", "Ноя", "Дек"}, + {"Январ", "Феврал", "Март", "Апрел", "Май", "Июн", "Июл", "Август", "Сентябр", "Октябр", "Ноябр", "Декабр"}, + {}, +} + +var localeTableTgTJ = [5][]string{ + {"Яшб", "Дшб", "Сшб", "Чшб", "Пшб", "Ҷмъ", "Шнб"}, + {"Якшанбе", "Душанбе", "Сешанбе", "Чоршанбе", "Панҷшанбе", "Ҷумъа", "Шанбе"}, + {"Янв", "Фев", "Мар", "Апр", "Май", "Июн", "Июл", "Авг", "Сен", "Окт", "Ноя", "Дек"}, + {"Январ", "Феврал", "Март", "Апрел", "Май", "Июн", "Июл", "Август", "Сентябр", "Октябр", "Ноябр", "Декабр"}, + {}, +} + +var localeTableTh = [5][]string{ + {"อา.", "จ.", "อ.", "พ.", "พฤ.", "ศ.", "ส."}, + {"วันอาทิตย์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"}, + {"ม.ค.", "ก.พ.", "มี.ค.", "เม.ย.", "พ.ค.", "มิ.ย.", "ก.ค.", "ส.ค.", "ก.ย.", "ต.ค.", "พ.ย.", "ธ.ค."}, + {"มกราคม", "กุมภาพันธ์", "มีนาคม", "เมษายน", "พฤษภาคม", "มิถุนายน", "กรกฎาคม", "สิงหาคม", "กันยายน", "ตุลาคม", "พฤศจิกายน", "ธันวาคม"}, + {"a", "p"}, +} + +var localeTableThTH = [5][]string{ + {"อา.", "จ.", "อ.", "พ.", "พฤ.", "ศ.", "ส."}, + {"วันอาทิตย์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"}, + {"ม.ค.", "ก.พ.", "มี.ค.", "เม.ย.", "พ.ค.", "มิ.ย.", "ก.ค.", "ส.ค.", "ก.ย.", "ต.ค.", "พ.ย.", "ธ.ค."}, + {"มกราคม", "กุมภาพันธ์", "มีนาคม", "เมษายน", "พฤษภาคม", "มิถุนายน", "กรกฎาคม", "สิงหาคม", "กันยายน", "ตุลาคม", "พฤศจิกายน", "ธันวาคม"}, + {"a", "p"}, +} + +var localeTableTi = [5][]string{ + {"ሰን", "ሰኑ", "ሰሉ", "ረቡ", "ሓሙ", "ዓር", "ቀዳ"}, + {"ሰንበት", "ሰኑይ", "ሰሉስ", "ረቡዕ", "ሓሙስ", "ዓርቢ", "ቀዳም"}, + {"ጥሪ", "ለካ", "መጋ", "ሚያ", "ግን", "ሰነ", "ሓም", "ነሓ", "መስ", "ጥቅ", "ሕዳ", "ታሕ"}, + {"ጥሪ", "ለካቲት", "መጋቢት", "ሚያዝያ", "ግንቦት", "ሰነ", "ሓምለ", "ነሓሰ", "መስከረም", "ጥቅምቲ", "ሕዳር", "ታሕሳስ"}, + {"ቅ.ቀ.", "ድ.ቀ."}, +} + +var localeTableTiER = [5][]string{ + {"ሰን", "ሰኑ", "ሰሉ", "ረቡ", "ሓሙ", "ዓር", "ቀዳ"}, + {"ሰንበት", "ሰኑይ", "ሰሉስ", "ረቡዕ", "ሓሙስ", "ዓርቢ", "ቀዳም"}, + {"ጥሪ", "ለካ", "መጋ", "ሚያ", "ግን", "ሰነ", "ሓም", "ነሓ", "መስ", "ጥቅ", "ሕዳ", "ታሕ"}, + {"ጥሪ", "ለካቲት", "መጋቢት", "ሚያዝያ", "ግንቦት", "ሰነ", "ሓምለ", "ነሓሰ", "መስከረም", "ጥቅምቲ", "ሕዳር", "ታሕሳስ"}, + {"ቅ.ቀ.", "ድ.ቀ."}, +} + +var localeTableTiET = [5][]string{ + {"ሰን", "ሰኑ", "ሰሉ", "ረቡ", "ሓሙ", "ዓር", "ቀዳ"}, + {"ሰንበት", "ሰኑይ", "ሰሉስ", "ረቡዕ", "ሓሙስ", "ዓርቢ", "ቀዳም"}, + {"ጥሪ", "ለካ", "መጋ", "ሚያ", "ግን", "ሰነ", "ሓም", "ነሓ", "መስ", "ጥቅ", "ሕዳ", "ታሕ"}, + {"ጥሪ", "ለካቲት", "መጋቢት", "ሚያዝያ", "ግንቦት", "ሰነ", "ሓምለ", "ነሓሰ", "መስከረም", "ጥቅምቲ", "ሕዳር", "ታሕሳስ"}, + {"ቅ.ቀ.", "ድ.ቀ."}, +} + +var localeTableTig = [5][]string{ + {"ሰ/ዓ", "ሰኖ", "ታላሸ", "ኣረር", "ከሚሽ", "ጅምዓ", "ሰ/ን"}, + {"ሰንበት ዓባይ", "ሰኖ", "ታላሸኖ", "ኣረርባዓ", "ከሚሽ", "ጅምዓት", "ሰንበት ንኢሽ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ቀደምሰርምዕል", "ሓቆስርምዕል"}, +} + +var localeTableTigER = [5][]string{ + {"ሰ/ዓ", "ሰኖ", "ታላሸ", "ኣረር", "ከሚሽ", "ጅምዓ", "ሰ/ን"}, + {"ሰንበት ዓባይ", "ሰኖ", "ታላሸኖ", "ኣረርባዓ", "ከሚሽ", "ጅምዓት", "ሰንበት ንኢሽ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ቀደምሰርምዕል", "ሓቆስርምዕል"}, +} + +var localeTableTk = [5][]string{ + {"ýek", "duş", "siş", "çar", "pen", "ann", "şen"}, + {"ýekşenbe", "duşenbe", "sişenbe", "çarşenbe", "penşenbe", "anna", "şenbe"}, + {"ýan", "few", "mart", "apr", "maý", "iýun", "iýul", "awg", "sen", "okt", "noý", "dek"}, + {"ýanwar", "fewral", "mart", "aprel", "maý", "iýun", "iýul", "awgust", "sentýabr", "oktýabr", "noýabr", "dekabr"}, + {"go.öň", "go.soň"}, +} + +var localeTableTkTM = [5][]string{ + {"ýek", "duş", "siş", "çar", "pen", "ann", "şen"}, + {"ýekşenbe", "duşenbe", "sişenbe", "çarşenbe", "penşenbe", "anna", "şenbe"}, + {"ýan", "few", "mart", "apr", "maý", "iýun", "iýul", "awg", "sen", "okt", "noý", "dek"}, + {"ýanwar", "fewral", "mart", "aprel", "maý", "iýun", "iýul", "awgust", "sentýabr", "oktýabr", "noýabr", "dekabr"}, + {"go.öň", "go.soň"}, +} + +var localeTableTn = [5][]string{ + {"Tsh", "Mos", "Labb", "Labr", "Labn", "Labt", "Mat"}, + {"Tshipi", "Mosopulogo", "Labobedi", "Laboraro", "Labone", "Labotlhano", "Matlhatso"}, + {"Fer", "Tlh", "Mop", "Mor", "Mot", "See", "Phu", "Pha", "Lwe", "Dip", "Ngw", "Sed"}, + {"Ferikgong", "Tlhakole", "Mopitlo", "Moranang", "Motsheganang", "Seetebosigo", "Phukwi", "Phatwe", "Lwetse", "Diphalane", "Ngwanatsele", "Sedimonthole"}, + {"a", "p"}, +} + +var localeTableTnBW = [5][]string{ + {"Tsh", "Mos", "Labb", "Labr", "Labn", "Labt", "Mat"}, + {"Tshipi", "Mosopulogo", "Labobedi", "Laboraro", "Labone", "Labotlhano", "Matlhatso"}, + {"Fer", "Tlh", "Mop", "Mor", "Mot", "See", "Phu", "Pha", "Lwe", "Dip", "Ngw", "Sed"}, + {"Ferikgong", "Tlhakole", "Mopitlo", "Moranang", "Motsheganang", "Seetebosigo", "Phukwi", "Phatwe", "Lwetse", "Diphalane", "Ngwanatsele", "Sedimonthole"}, + {"a", "p"}, +} + +var localeTableTnZA = [5][]string{ + {"Tsh", "Mos", "Labb", "Labr", "Labn", "Labt", "Mat"}, + {"Tshipi", "Mosopulogo", "Labobedi", "Laboraro", "Labone", "Labotlhano", "Matlhatso"}, + {"Fer", "Tlh", "Mop", "Mor", "Mot", "See", "Phu", "Pha", "Lwe", "Dip", "Ngw", "Sed"}, + {"Ferikgong", "Tlhakole", "Mopitlo", "Moranang", "Motsheganang", "Seetebosigo", "Phukwi", "Phatwe", "Lwetse", "Diphalane", "Ngwanatsele", "Sedimonthole"}, + {"a", "p"}, +} + +var localeTableTo = [5][]string{ + {"Sāp", "Mōn", "Tūs", "Pul", "Tuʻa", "Fal", "Tok"}, + {"Sāpate", "Mōnite", "Tūsite", "Pulelulu", "Tuʻapulelulu", "Falaite", "Tokonaki"}, + {"Sān", "Fēp", "Maʻa", "ʻEpe", "Mē", "Sun", "Siu", "ʻAok", "Sēp", "ʻOka", "Nōv", "Tīs"}, + {"Sānuali", "Fēpueli", "Maʻasi", "ʻEpeleli", "Mē", "Sune", "Siulai", "ʻAokosi", "Sēpitema", "ʻOkatopa", "Nōvema", "Tīsema"}, + {"HH", "EA"}, +} + +var localeTableToTO = [5][]string{ + {"Sāp", "Mōn", "Tūs", "Pul", "Tuʻa", "Fal", "Tok"}, + {"Sāpate", "Mōnite", "Tūsite", "Pulelulu", "Tuʻapulelulu", "Falaite", "Tokonaki"}, + {"Sān", "Fēp", "Maʻa", "ʻEpe", "Mē", "Sun", "Siu", "ʻAok", "Sēp", "ʻOka", "Nōv", "Tīs"}, + {"Sānuali", "Fēpueli", "Maʻasi", "ʻEpeleli", "Mē", "Sune", "Siulai", "ʻAokosi", "Sēpitema", "ʻOkatopa", "Nōvema", "Tīsema"}, + {"HH", "EA"}, +} + +var localeTableTok = [5][]string{ + {}, + {"suno esun #7", "suno esun #1", "suno esun #2", "suno esun #3", "suno esun #4", "suno esun #5", "suno esun #6"}, + {}, + {"mun #1", "mun #2", "mun #3", "mun #4", "mun #5", "mun #6", "mun #7", "mun #8", "mun #9", "mun #10", "mun #11", "mun #12"}, + {}, +} + +var localeTableTok001 = [5][]string{ + {}, + {"suno esun #7", "suno esun #1", "suno esun #2", "suno esun #3", "suno esun #4", "suno esun #5", "suno esun #6"}, + {}, + {"mun #1", "mun #2", "mun #3", "mun #4", "mun #5", "mun #6", "mun #7", "mun #8", "mun #9", "mun #10", "mun #11", "mun #12"}, + {}, +} + +var localeTableTpi = [5][]string{ + {"San", "Man", "Tun", "Tri", "Fon", "Fra", "Sar"}, + {"Sande", "Mande", "Tunde", "Trinde", "Fonde", "Fraide", "Sarere"}, + {"Jan", "Feb", "Mas", "Epr", "Me", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Des"}, + {"Janueri", "Februeri", "Mas", "Epril", "Me", "Jun", "Julai", "Ogas", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {}, +} + +var localeTableTpiPG = [5][]string{ + {"San", "Man", "Tun", "Tri", "Fon", "Fra", "Sar"}, + {"Sande", "Mande", "Tunde", "Trinde", "Fonde", "Fraide", "Sarere"}, + {"Jan", "Feb", "Mas", "Epr", "Me", "Jun", "Jul", "Oga", "Sep", "Okt", "Nov", "Des"}, + {"Janueri", "Februeri", "Mas", "Epril", "Me", "Jun", "Julai", "Ogas", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {}, +} + +var localeTableTr = [5][]string{ + {"Paz", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt"}, + {"Pazar", "Pazartesi", "Salı", "Çarşamba", "Perşembe", "Cuma", "Cumartesi"}, + {"Oca", "Şub", "Mar", "Nis", "May", "Haz", "Tem", "Ağu", "Eyl", "Eki", "Kas", "Ara"}, + {"Ocak", "Şubat", "Mart", "Nisan", "Mayıs", "Haziran", "Temmuz", "Ağustos", "Eylül", "Ekim", "Kasım", "Aralık"}, + {"ÖÖ", "ÖS"}, +} + +var localeTableTrCY = [5][]string{ + {"Paz", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt"}, + {"Pazar", "Pazartesi", "Salı", "Çarşamba", "Perşembe", "Cuma", "Cumartesi"}, + {"Oca", "Şub", "Mar", "Nis", "May", "Haz", "Tem", "Ağu", "Eyl", "Eki", "Kas", "Ara"}, + {"Ocak", "Şubat", "Mart", "Nisan", "Mayıs", "Haziran", "Temmuz", "Ağustos", "Eylül", "Ekim", "Kasım", "Aralık"}, + {"ÖÖ", "ÖS"}, +} + +var localeTableTrTR = [5][]string{ + {"Paz", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt"}, + {"Pazar", "Pazartesi", "Salı", "Çarşamba", "Perşembe", "Cuma", "Cumartesi"}, + {"Oca", "Şub", "Mar", "Nis", "May", "Haz", "Tem", "Ağu", "Eyl", "Eki", "Kas", "Ara"}, + {"Ocak", "Şubat", "Mart", "Nisan", "Mayıs", "Haziran", "Temmuz", "Ağustos", "Eylül", "Ekim", "Kasım", "Aralık"}, + {"ÖÖ", "ÖS"}, +} + +var localeTableTrv = [5][]string{ + {"Emp", "Kin", "Dha", "Tru", "Spa", "Rim", "Mat"}, + {"Jiyax sngayan", "tgKingal jiyax iyax sngayan", "tgDha jiyax iyax sngayan", "tgTru jiyax iyax sngayan", "tgSpac jiyax iyax sngayan", "tgRima jiyax iyax sngayan", "tgMataru jiyax iyax sngayan"}, + {"Kii", "Dhi", "Tri", "Spi", "Rii", "Mti", "Emi", "Mai", "Mni", "Mxi", "Mxk", "Mxd"}, + {"Kingal idas", "Dha idas", "Tru idas", "Spat idas", "Rima idas", "Mataru idas", "Empitu idas", "Maspat idas", "Mngari idas", "Maxal idas", "Maxal kingal idas", "Maxal dha idas"}, + {}, +} + +var localeTableTrvTW = [5][]string{ + {"Emp", "Kin", "Dha", "Tru", "Spa", "Rim", "Mat"}, + {"Jiyax sngayan", "tgKingal jiyax iyax sngayan", "tgDha jiyax iyax sngayan", "tgTru jiyax iyax sngayan", "tgSpac jiyax iyax sngayan", "tgRima jiyax iyax sngayan", "tgMataru jiyax iyax sngayan"}, + {"Kii", "Dhi", "Tri", "Spi", "Rii", "Mti", "Emi", "Mai", "Mni", "Mxi", "Mxk", "Mxd"}, + {"Kingal idas", "Dha idas", "Tru idas", "Spat idas", "Rima idas", "Mataru idas", "Empitu idas", "Maspat idas", "Mngari idas", "Maxal idas", "Maxal kingal idas", "Maxal dha idas"}, + {}, +} + +var localeTableTrw = [5][]string{ + {}, + {"ایکشیمے", "دُوشیمے", "گھن آنگا", "چارشیمے", "پَئ شیمے", "شُوگار", "لَو آنگا"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableTrwPK = [5][]string{ + {}, + {"ایکشیمے", "دُوشیمے", "گھن آنگا", "چارشیمے", "پَئ شیمے", "شُوگار", "لَو آنگا"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئ", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableTs = [5][]string{ + {"Son", "Mus", "Bir", "Har", "Ne", "Tlh", "Mug"}, + {"Sonta", "Musumbhunuku", "Ravumbirhi", "Ravunharhu", "Ravumune", "Ravuntlhanu", "Mugqivela"}, + {"Sun", "Yan", "Kul", "Dzi", "Mud", "Kho", "Maw", "Mha", "Ndz", "Nhl", "Huk", "N’w"}, + {"Sunguti", "Nyenyenyani", "Nyenyankulu", "Dzivamisoko", "Mudyaxihi", "Khotavuxika", "Mawuwani", "Mhawuri", "Ndzhati", "Nhlangula", "Hukuri", "N’wendzamhala"}, + {}, +} + +var localeTableTsZA = [5][]string{ + {"Son", "Mus", "Bir", "Har", "Ne", "Tlh", "Mug"}, + {"Sonta", "Musumbhunuku", "Ravumbirhi", "Ravunharhu", "Ravumune", "Ravuntlhanu", "Mugqivela"}, + {"Sun", "Yan", "Kul", "Dzi", "Mud", "Kho", "Maw", "Mha", "Ndz", "Nhl", "Huk", "N’w"}, + {"Sunguti", "Nyenyenyani", "Nyenyankulu", "Dzivamisoko", "Mudyaxihi", "Khotavuxika", "Mawuwani", "Mhawuri", "Ndzhati", "Nhlangula", "Hukuri", "N’wendzamhala"}, + {}, +} + +var localeTableTt = [5][]string{ + {"якш.", "дүш.", "сиш.", "чәр.", "пәнҗ.", "җом.", "шим."}, + {"якшәмбе", "дүшәмбе", "сишәмбе", "чәршәмбе", "пәнҗешәмбе", "җомга", "шимбә"}, + {"гыйн.", "фев.", "мар.", "апр.", "май", "июнь", "июль", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"гыйнвар", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableTtRU = [5][]string{ + {"якш.", "дүш.", "сиш.", "чәр.", "пәнҗ.", "җом.", "шим."}, + {"якшәмбе", "дүшәмбе", "сишәмбе", "чәршәмбе", "пәнҗешәмбе", "җомга", "шимбә"}, + {"гыйн.", "фев.", "мар.", "апр.", "май", "июнь", "июль", "авг.", "сент.", "окт.", "нояб.", "дек."}, + {"гыйнвар", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь"}, + {}, +} + +var localeTableTwq = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableTwqNE = [5][]string{ + {"Alh", "Ati", "Ata", "Ala", "Alm", "Alz", "Asi"}, + {"Alhadi", "Atinni", "Atalaata", "Alarba", "Alhamiisa", "Alzuma", "Asibti"}, + {"Žan", "Fee", "Mar", "Awi", "Me", "Žuw", "Žuy", "Ut", "Sek", "Okt", "Noo", "Dee"}, + {"Žanwiye", "Feewiriye", "Marsi", "Awiril", "Me", "Žuweŋ", "Žuyye", "Ut", "Sektanbur", "Oktoobur", "Noowanbur", "Deesanbur"}, + {"Subbaahi", "Zaarikayb"}, +} + +var localeTableTzm = [5][]string{ + {"Asa", "Ayn", "Asn", "Akr", "Akw", "Asm", "Asḍ"}, + {"Asamas", "Aynas", "Asinas", "Akras", "Akwas", "Asimwas", "Asiḍyas"}, + {"Yen", "Yeb", "Mar", "Ibr", "May", "Yun", "Yul", "Ɣuc", "Cut", "Kṭu", "Nwa", "Duj"}, + {"Yennayer", "Yebrayer", "Mars", "Ibrir", "Mayyu", "Yunyu", "Yulyuz", "Ɣuct", "Cutanbir", "Kṭuber", "Nwanbir", "Dujanbir"}, + {"Zdatazal", "Ḍeffiraza"}, +} + +var localeTableTzmMA = [5][]string{ + {"Asa", "Ayn", "Asn", "Akr", "Akw", "Asm", "Asḍ"}, + {"Asamas", "Aynas", "Asinas", "Akras", "Akwas", "Asimwas", "Asiḍyas"}, + {"Yen", "Yeb", "Mar", "Ibr", "May", "Yun", "Yul", "Ɣuc", "Cut", "Kṭu", "Nwa", "Duj"}, + {"Yennayer", "Yebrayer", "Mars", "Ibrir", "Mayyu", "Yunyu", "Yulyuz", "Ɣuct", "Cutanbir", "Kṭuber", "Nwanbir", "Dujanbir"}, + {"Zdatazal", "Ḍeffiraza"}, +} + +var localeTableUg = [5][]string{ + {"يە", "دۈ", "سە", "چا", "پە", "جۈ", "شە"}, + {"يەكشەنبە", "دۈشەنبە", "سەيشەنبە", "چارشەنبە", "پەيشەنبە", "جۈمە", "شەنبە"}, + {}, + {"يانۋار", "فېۋرال", "مارت", "ئاپرېل", "ماي", "ئىيۇن", "ئىيۇل", "ئاۋغۇست", "سېنتەبىر", "ئۆكتەبىر", "نويابىر", "دېكابىر"}, + {"چ.ب", "چ.ك"}, +} + +var localeTableUgCN = [5][]string{ + {"يە", "دۈ", "سە", "چا", "پە", "جۈ", "شە"}, + {"يەكشەنبە", "دۈشەنبە", "سەيشەنبە", "چارشەنبە", "پەيشەنبە", "جۈمە", "شەنبە"}, + {}, + {"يانۋار", "فېۋرال", "مارت", "ئاپرېل", "ماي", "ئىيۇن", "ئىيۇل", "ئاۋغۇست", "سېنتەبىر", "ئۆكتەبىر", "نويابىر", "دېكابىر"}, + {"چ.ب", "چ.ك"}, +} + +var localeTableUk = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неділю", "понеділок", "вівторок", "середу", "четвер", "пʼятницю", "суботу"}, + {"січ.", "лют.", "бер.", "квіт.", "трав.", "черв.", "лип.", "серп.", "вер.", "жовт.", "лист.", "груд."}, + {"січня", "лютого", "березня", "квітня", "травня", "червня", "липня", "серпня", "вересня", "жовтня", "листопада", "грудня"}, + {"дп", "пп"}, +} + +var localeTableUkUA = [5][]string{ + {"нд", "пн", "вт", "ср", "чт", "пт", "сб"}, + {"неділю", "понеділок", "вівторок", "середу", "четвер", "пʼятницю", "суботу"}, + {"січ.", "лют.", "бер.", "квіт.", "трав.", "черв.", "лип.", "серп.", "вер.", "жовт.", "лист.", "груд."}, + {"січня", "лютого", "березня", "квітня", "травня", "червня", "липня", "серпня", "вересня", "жовтня", "листопада", "грудня"}, + {"дп", "пп"}, +} + +var localeTableUnd = [5][]string{ + {}, + {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}, + {}, + {"M01", "M02", "M03", "M04", "M05", "M06", "M07", "M08", "M09", "M10", "M11", "M12"}, + {"AM", "PM"}, +} + +var localeTableUr = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableUrIN = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableUrPK = [5][]string{ + {}, + {"اتوار", "پیر", "منگل", "بدھ", "جمعرات", "جمعہ", "ہفتہ"}, + {}, + {"جنوری", "فروری", "مارچ", "اپریل", "مئی", "جون", "جولائی", "اگست", "ستمبر", "اکتوبر", "نومبر", "دسمبر"}, + {"a", "p"}, +} + +var localeTableUz = [5][]string{ + {"Yak", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan"}, + {"yakshanba", "dushanba", "seshanba", "chorshanba", "payshanba", "juma", "shanba"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avg", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avgust", "sentabr", "oktabr", "noyabr", "dekabr"}, + {"TO", "TK"}, +} + +var localeTableUzArab = [5][]string{ + {"ی.", "د.", "س.", "چ.", "پ.", "ج.", "ش."}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"جنو", "فبر", "مار", "اپر", "می", "جون", "جول", "اگس", "سپت", "اکت", "نوم", "دسم"}, + {"جنوری", "فبروری", "مارچ", "اپریل", "می", "جون", "جولای", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableUzArabAF = [5][]string{ + {"ی.", "د.", "س.", "چ.", "پ.", "ج.", "ش."}, + {"یکشنبه", "دوشنبه", "سه‌شنبه", "چهارشنبه", "پنجشنبه", "جمعه", "شنبه"}, + {"جنو", "فبر", "مار", "اپر", "می", "جون", "جول", "اگس", "سپت", "اکت", "نوم", "دسم"}, + {"جنوری", "فبروری", "مارچ", "اپریل", "می", "جون", "جولای", "اگست", "سپتمبر", "اکتوبر", "نومبر", "دسمبر"}, + {}, +} + +var localeTableUzCyrl = [5][]string{ + {"якш", "душ", "сеш", "чор", "пай", "жум", "шан"}, + {"якшанба", "душанба", "сешанба", "чоршанба", "пайшанба", "жума", "шанба"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январ", "феврал", "март", "апрел", "май", "июн", "июл", "август", "сентябр", "октябр", "ноябр", "декабр"}, + {"ТО", "ТК"}, +} + +var localeTableUzCyrlUZ = [5][]string{ + {"якш", "душ", "сеш", "чор", "пай", "жум", "шан"}, + {"якшанба", "душанба", "сешанба", "чоршанба", "пайшанба", "жума", "шанба"}, + {"янв", "фев", "мар", "апр", "май", "июн", "июл", "авг", "сен", "окт", "ноя", "дек"}, + {"январ", "феврал", "март", "апрел", "май", "июн", "июл", "август", "сентябр", "октябр", "ноябр", "декабр"}, + {"ТО", "ТК"}, +} + +var localeTableUzLatn = [5][]string{ + {"Yak", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan"}, + {"yakshanba", "dushanba", "seshanba", "chorshanba", "payshanba", "juma", "shanba"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avg", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avgust", "sentabr", "oktabr", "noyabr", "dekabr"}, + {"TO", "TK"}, +} + +var localeTableUzLatnUZ = [5][]string{ + {"Yak", "Dush", "Sesh", "Chor", "Pay", "Jum", "Shan"}, + {"yakshanba", "dushanba", "seshanba", "chorshanba", "payshanba", "juma", "shanba"}, + {"yan", "fev", "mar", "apr", "may", "iyn", "iyl", "avg", "sen", "okt", "noy", "dek"}, + {"yanvar", "fevral", "mart", "aprel", "may", "iyun", "iyul", "avgust", "sentabr", "oktabr", "noyabr", "dekabr"}, + {"TO", "TK"}, +} + +var localeTableVai = [5][]string{ + {}, + {"ꕞꕌꔵ", "ꗳꗡꘉ", "ꕚꕞꕚ", "ꕉꕞꕒ", "ꕉꔤꕆꕢ", "ꕉꔤꕀꕮ", "ꔻꔬꔳ"}, + {"ꖨꖕꔞ", "ꕒꕡ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋ", "ꖨꖕꗏ"}, + {"ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ", "ꕒꕡꖝꖕ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞꔤ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋꕔꕿ ꕸꖃꗏ", "ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ"}, + {}, +} + +var localeTableVaiLatn = [5][]string{ + {}, + {"lahadi", "tɛɛnɛɛ", "talata", "alaba", "aimisa", "aijima", "siɓiti"}, + {}, + {}, + {}, +} + +var localeTableVaiLatnLR = [5][]string{ + {}, + {"lahadi", "tɛɛnɛɛ", "talata", "alaba", "aimisa", "aijima", "siɓiti"}, + {}, + {}, + {}, +} + +var localeTableVaiVaii = [5][]string{ + {}, + {"ꕞꕌꔵ", "ꗳꗡꘉ", "ꕚꕞꕚ", "ꕉꕞꕒ", "ꕉꔤꕆꕢ", "ꕉꔤꕀꕮ", "ꔻꔬꔳ"}, + {"ꖨꖕꔞ", "ꕒꕡ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋ", "ꖨꖕꗏ"}, + {"ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ", "ꕒꕡꖝꖕ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞꔤ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋꕔꕿ ꕸꖃꗏ", "ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ"}, + {}, +} + +var localeTableVaiVaiiLR = [5][]string{ + {}, + {"ꕞꕌꔵ", "ꗳꗡꘉ", "ꕚꕞꕚ", "ꕉꕞꕒ", "ꕉꔤꕆꕢ", "ꕉꔤꕀꕮ", "ꔻꔬꔳ"}, + {"ꖨꖕꔞ", "ꕒꕡ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋ", "ꖨꖕꗏ"}, + {"ꖨꖕ ꕪꕴ ꔞꔀꕮꕊ", "ꕒꕡꖝꖕ", "ꕾꖺ", "ꖢꖕ", "ꖑꕱ", "ꖱꘋ", "ꖱꕞꔤ", "ꗛꔕ", "ꕢꕌ", "ꕭꖃ", "ꔞꘋꕔꕿ ꕸꖃꗏ", "ꖨꖕ ꕪꕴ ꗏꖺꕮꕊ"}, + {}, +} + +var localeTableVe = [5][]string{ + {"Swo", "Mus", "Vhi", "Rar", "Ṋa", "Ṱan", "Mug"}, + {"Swondaha", "Musumbuluwo", "Ḽavhuvhili", "Ḽavhuraru", "Ḽavhuṋa", "Ḽavhuṱanu", "Mugivhela"}, + {"Pha", "Luh", "Ṱhf", "Lam", "Shu", "Lwi", "Lwa", "Ṱha", "Khu", "Tsh", "Ḽar", "Nye"}, + {"Phando", "Luhuhi", "Ṱhafamuhwe", "Lambamai", "Shundunthule", "Fulwi", "Fulwana", "Ṱhangule", "Khubvumedzi", "Tshimedzi", "Ḽara", "Nyendavhusiku"}, + {}, +} + +var localeTableVeZA = [5][]string{ + {"Swo", "Mus", "Vhi", "Rar", "Ṋa", "Ṱan", "Mug"}, + {"Swondaha", "Musumbuluwo", "Ḽavhuvhili", "Ḽavhuraru", "Ḽavhuṋa", "Ḽavhuṱanu", "Mugivhela"}, + {"Pha", "Luh", "Ṱhf", "Lam", "Shu", "Lwi", "Lwa", "Ṱha", "Khu", "Tsh", "Ḽar", "Nye"}, + {"Phando", "Luhuhi", "Ṱhafamuhwe", "Lambamai", "Shundunthule", "Fulwi", "Fulwana", "Ṱhangule", "Khubvumedzi", "Tshimedzi", "Ḽara", "Nyendavhusiku"}, + {}, +} + +var localeTableVec = [5][]string{ + {"dom", "lun", "mar", "mer", "zob", "vèn", "sab"}, + {"doménega", "luni", "marti", "mèrcore", "zoba", "vènare", "sabo"}, + {"jen", "feb", "mar", "apr", "maj", "jug", "luj", "ago", "set", "oto", "nov", "des"}, + {"jenaro", "febraro", "marso", "aprile", "majo", "jugno", "lujo", "agosto", "setenbre", "otobre", "novenbre", "desenbre"}, + {}, +} + +var localeTableVecIT = [5][]string{ + {"dom", "lun", "mar", "mer", "zob", "vèn", "sab"}, + {"doménega", "luni", "marti", "mèrcore", "zoba", "vènare", "sabo"}, + {"jen", "feb", "mar", "apr", "maj", "jug", "luj", "ago", "set", "oto", "nov", "des"}, + {"jenaro", "febraro", "marso", "aprile", "majo", "jugno", "lujo", "agosto", "setenbre", "otobre", "novenbre", "desenbre"}, + {}, +} + +var localeTableVi = [5][]string{ + {"CN", "Th 2", "Th 3", "Th 4", "Th 5", "Th 6", "Th 7"}, + {"Chủ Nhật", "Thứ Hai", "Thứ Ba", "Thứ Tư", "Thứ Năm", "Thứ Sáu", "Thứ Bảy"}, + {"thg 1", "thg 2", "thg 3", "thg 4", "thg 5", "thg 6", "thg 7", "thg 8", "thg 9", "thg 10", "thg 11", "thg 12"}, + {"tháng 1", "tháng 2", "tháng 3", "tháng 4", "tháng 5", "tháng 6", "tháng 7", "tháng 8", "tháng 9", "tháng 10", "tháng 11", "tháng 12"}, + {"SA", "CH"}, +} + +var localeTableViVN = [5][]string{ + {"CN", "Th 2", "Th 3", "Th 4", "Th 5", "Th 6", "Th 7"}, + {"Chủ Nhật", "Thứ Hai", "Thứ Ba", "Thứ Tư", "Thứ Năm", "Thứ Sáu", "Thứ Bảy"}, + {"thg 1", "thg 2", "thg 3", "thg 4", "thg 5", "thg 6", "thg 7", "thg 8", "thg 9", "thg 10", "thg 11", "thg 12"}, + {"tháng 1", "tháng 2", "tháng 3", "tháng 4", "tháng 5", "tháng 6", "tháng 7", "tháng 8", "tháng 9", "tháng 10", "tháng 11", "tháng 12"}, + {"SA", "CH"}, +} + +var localeTableVmw = [5][]string{ + {}, + {"ettiminku", "nihiku noolempwa", "namaanli", "namararu", "namaxexe", "namathanu", "esaabadu"}, + {}, + {"janeiru", "fevereiru", "marsu", "abril", "maiu", "junyu", "julyu", "agostu", "setembru", "outubru", "novembru", "dezembru"}, + {}, +} + +var localeTableVmwMZ = [5][]string{ + {}, + {"ettiminku", "nihiku noolempwa", "namaanli", "namararu", "namaxexe", "namathanu", "esaabadu"}, + {}, + {"janeiru", "fevereiru", "marsu", "abril", "maiu", "junyu", "julyu", "agostu", "setembru", "outubru", "novembru", "dezembru"}, + {}, +} + +var localeTableVo = [5][]string{ + {"su.", "mu.", "tu.", "ve.", "dö.", "fr.", "zä."}, + {"sudel", "mudel", "tudel", "vedel", "dödel", "fridel", "zädel"}, + {"yan", "feb", "mäz", "prl", "may", "yun", "yul", "gst", "set", "ton", "nov", "dek"}, + {"yanul", "febul", "mäzul", "prilul", "mayul", "yunul", "yulul", "gustul", "setul", "tobul", "novul", "dekul"}, + {}, +} + +var localeTableVo001 = [5][]string{ + {"su.", "mu.", "tu.", "ve.", "dö.", "fr.", "zä."}, + {"sudel", "mudel", "tudel", "vedel", "dödel", "fridel", "zädel"}, + {"yan", "feb", "mäz", "prl", "may", "yun", "yul", "gst", "set", "ton", "nov", "dek"}, + {"yanul", "febul", "mäzul", "prilul", "mayul", "yunul", "yulul", "gustul", "setul", "tobul", "novul", "dekul"}, + {}, +} + +var localeTableVun = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableVunTZ = [5][]string{ + {"Jpi", "Jtt", "Jnn", "Jtn", "Alh", "Iju", "Jmo"}, + {"Jumapilyi", "Jumatatuu", "Jumanne", "Jumatanu", "Alhamisi", "Ijumaa", "Jumamosi"}, + {"Jan", "Feb", "Mac", "Apr", "Mei", "Jun", "Jul", "Ago", "Sep", "Okt", "Nov", "Des"}, + {"Januari", "Februari", "Machi", "Aprilyi", "Mei", "Junyi", "Julyai", "Agusti", "Septemba", "Oktoba", "Novemba", "Desemba"}, + {"utuko", "kyiukonyi"}, +} + +var localeTableWae = [5][]string{ + {"Sun", "Män", "Ziš", "Mit", "Fró", "Fri", "Sam"}, + {"Sunntag", "Mäntag", "Zištag", "Mittwuč", "Fróntag", "Fritag", "Samštag"}, + {"Jen", "Hor", "Mär", "Abr", "Mei", "Brá", "Hei", "Öig", "Her", "Wím", "Win", "Chr"}, + {"Jenner", "Hornig", "Märze", "Abrille", "Meije", "Bráčet", "Heiwet", "Öigšte", "Herbštmánet", "Wímánet", "Wintermánet", "Chrištmánet"}, + {}, +} + +var localeTableWaeCH = [5][]string{ + {"Sun", "Män", "Ziš", "Mit", "Fró", "Fri", "Sam"}, + {"Sunntag", "Mäntag", "Zištag", "Mittwuč", "Fróntag", "Fritag", "Samštag"}, + {"Jen", "Hor", "Mär", "Abr", "Mei", "Brá", "Hei", "Öig", "Her", "Wím", "Win", "Chr"}, + {"Jenner", "Hornig", "Märze", "Abrille", "Meije", "Bráčet", "Heiwet", "Öigšte", "Herbštmánet", "Wímánet", "Wintermánet", "Chrištmánet"}, + {}, +} + +var localeTableWal = [5][]string{ + {}, + {"ወጋ", "ሳይኖ", "ማቆሳኛ", "አሩዋ", "ሃሙሳ", "አርባ", "ቄራ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ማለዶ", "ቃማ"}, +} + +var localeTableWalET = [5][]string{ + {}, + {"ወጋ", "ሳይኖ", "ማቆሳኛ", "አሩዋ", "ሃሙሳ", "አርባ", "ቄራ"}, + {"ጃንዩ", "ፌብሩ", "ማርች", "ኤፕረ", "ሜይ", "ጁን", "ጁላይ", "ኦገስ", "ሴፕቴ", "ኦክተ", "ኖቬም", "ዲሴም"}, + {"ጃንዩወሪ", "ፌብሩወሪ", "ማርች", "ኤፕረል", "ሜይ", "ጁን", "ጁላይ", "ኦገስት", "ሴፕቴምበር", "ኦክተውበር", "ኖቬምበር", "ዲሴምበር"}, + {"ማለዶ", "ቃማ"}, +} + +var localeTableWo = [5][]string{ + {"Dib", "Alt", "Tal", "Àla", "Alx", "Àjj", "Ase"}, + {"Dibéer", "Altine", "Talaata", "Àlarba", "Alxamis", "Àjjuma", "Aseer"}, + {"Sam", "Few", "Mar", "Awr", "Mee", "Suw", "Sul", "Ut", "Sàt", "Okt", "Now", "Des"}, + {"Samwiyee", "Fewriyee", "Mars", "Awril", "Mee", "Suwe", "Sulet", "Ut", "Sàttumbar", "Oktoobar", "Nowàmbar", "Desàmbar"}, + {"Sub", "Ngo"}, +} + +var localeTableWoSN = [5][]string{ + {"Dib", "Alt", "Tal", "Àla", "Alx", "Àjj", "Ase"}, + {"Dibéer", "Altine", "Talaata", "Àlarba", "Alxamis", "Àjjuma", "Aseer"}, + {"Sam", "Few", "Mar", "Awr", "Mee", "Suw", "Sul", "Ut", "Sàt", "Okt", "Now", "Des"}, + {"Samwiyee", "Fewriyee", "Mars", "Awril", "Mee", "Suwe", "Sulet", "Ut", "Sàttumbar", "Oktoobar", "Nowàmbar", "Desàmbar"}, + {"Sub", "Ngo"}, +} + +var localeTableXh = [5][]string{ + {"Caw", "Mvu", "Lwesb", "Tha", "Sin", "Hla", "Mgq"}, + {"Cawe", "Mvulo", "Lwesibini", "Lwesithathu", "Lwesine", "Lwesihlanu", "Mgqibelo"}, + {"Jan", "Feb", "Mat", "Epr", "Mey", "Jun", "Jul", "Aga", "Sept", "Okt", "Nov", "Dis"}, + {"Janyuwari", "Februwari", "Matshi", "Epreli", "Meyi", "Juni", "Julayi", "Agasti", "Septemba", "Okthobha", "Novemba", "Disemba"}, + {}, +} + +var localeTableXhZA = [5][]string{ + {"Caw", "Mvu", "Lwesb", "Tha", "Sin", "Hla", "Mgq"}, + {"Cawe", "Mvulo", "Lwesibini", "Lwesithathu", "Lwesine", "Lwesihlanu", "Mgqibelo"}, + {"Jan", "Feb", "Mat", "Epr", "Mey", "Jun", "Jul", "Aga", "Sept", "Okt", "Nov", "Dis"}, + {"Janyuwari", "Februwari", "Matshi", "Epreli", "Meyi", "Juni", "Julayi", "Agasti", "Septemba", "Okthobha", "Novemba", "Disemba"}, + {}, +} + +var localeTableXnr = [5][]string{ + {"तोआर", "सोआर", "मंगल", "बुध", "वीर", "शुक्कर", "शनि"}, + {"तोआर", "सोआर", "मंगलवार", "बुधवार", "वीरवार", "शुक्करवार", "शनिच्चरवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भ्यागा", "दपेहरा/संजा"}, +} + +var localeTableXnrIN = [5][]string{ + {"तोआर", "सोआर", "मंगल", "बुध", "वीर", "शुक्कर", "शनि"}, + {"तोआर", "सोआर", "मंगलवार", "बुधवार", "वीरवार", "शुक्करवार", "शनिच्चरवार"}, + {"जन॰", "फ़र॰", "मार्च", "अप्रैल", "मई", "जून", "जुल॰", "अग॰", "सित॰", "अक्तू॰", "नव॰", "दिस॰"}, + {"जनवरी", "फ़रवरी", "मार्च", "अप्रैल", "मई", "जून", "जुलाई", "अगस्त", "सितंबर", "अक्तूबर", "नवंबर", "दिसंबर"}, + {"भ्यागा", "दपेहरा/संजा"}, +} + +var localeTableXog = [5][]string{ + {"Sabi", "Bala", "Kubi", "Kusa", "Kuna", "Kuta", "Muka"}, + {"Sabiiti", "Balaza", "Owokubili", "Owokusatu", "Olokuna", "Olokutaanu", "Olomukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {"Munkyo", "Eigulo"}, +} + +var localeTableXogUG = [5][]string{ + {"Sabi", "Bala", "Kubi", "Kusa", "Kuna", "Kuta", "Muka"}, + {"Sabiiti", "Balaza", "Owokubili", "Owokusatu", "Olokuna", "Olokutaanu", "Olomukaaga"}, + {"Jan", "Feb", "Mar", "Apu", "Maa", "Juu", "Jul", "Agu", "Seb", "Oki", "Nov", "Des"}, + {"Janwaliyo", "Febwaliyo", "Marisi", "Apuli", "Maayi", "Juuni", "Julaayi", "Agusito", "Sebuttemba", "Okitobba", "Novemba", "Desemba"}, + {"Munkyo", "Eigulo"}, +} + +var localeTableYav = [5][]string{ + {"sd", "md", "mw", "et", "kl", "fl", "ss"}, + {"sɔ́ndiɛ", "móndie", "muányáŋmóndie", "metúkpíápɛ", "kúpélimetúkpiapɛ", "feléte", "séselé"}, + {"o.1", "o.2", "o.3", "o.4", "o.5", "o.6", "o.7", "o.8", "o.9", "o.10", "o.11", "o.12"}, + {"pikítíkítie, oólí ú kutúan", "siɛyɛ́, oóli ú kándíɛ", "ɔnsúmbɔl, oóli ú kátátúɛ", "mesiŋ, oóli ú kénie", "ensil, oóli ú kátánuɛ", "ɔsɔn", "efute", "pisuyú", "imɛŋ i puɔs", "imɛŋ i putúk,oóli ú kátíɛ", "makandikɛ", "pilɔndɔ́"}, + {"kiɛmɛ́ɛm", "kisɛ́ndɛ"}, +} + +var localeTableYavCM = [5][]string{ + {"sd", "md", "mw", "et", "kl", "fl", "ss"}, + {"sɔ́ndiɛ", "móndie", "muányáŋmóndie", "metúkpíápɛ", "kúpélimetúkpiapɛ", "feléte", "séselé"}, + {"o.1", "o.2", "o.3", "o.4", "o.5", "o.6", "o.7", "o.8", "o.9", "o.10", "o.11", "o.12"}, + {"pikítíkítie, oólí ú kutúan", "siɛyɛ́, oóli ú kándíɛ", "ɔnsúmbɔl, oóli ú kátátúɛ", "mesiŋ, oóli ú kénie", "ensil, oóli ú kátánuɛ", "ɔsɔn", "efute", "pisuyú", "imɛŋ i puɔs", "imɛŋ i putúk,oóli ú kátíɛ", "makandikɛ", "pilɔndɔ́"}, + {"kiɛmɛ́ɛm", "kisɛ́ndɛ"}, +} + +var localeTableYi = [5][]string{ + {}, + {"זונטיק", "מאָנטיק", "דינסטיק", "מיטוואך", "דאנערשטיק", "פֿרײַטיק", "שבת"}, + {}, + {"יאַנואַר", "פֿעברואַר", "מערץ", "אַפּריל", "מיי", "יוני", "יולי", "אויגוסט", "סעפּטעמבער", "אקטאבער", "נאוועמבער", "דעצעמבער"}, + {"פֿאַרמיטאָג", "נאָכמיטאָג"}, +} + +var localeTableYiUA = [5][]string{ + {}, + {"זונטיק", "מאָנטיק", "דינסטיק", "מיטוואך", "דאנערשטיק", "פֿרײַטיק", "שבת"}, + {}, + {"יאַנואַר", "פֿעברואַר", "מערץ", "אַפּריל", "מיי", "יוני", "יולי", "אויגוסט", "סעפּטעמבער", "אקטאבער", "נאוועמבער", "דעצעמבער"}, + {"פֿאַרמיטאָג", "נאָכמיטאָג"}, +} + +var localeTableYo = [5][]string{ + {"Àìk", "Aj", "Ìsẹ́g", "Ọjọ́r", "Ọjọ́b", "Ẹt", "Àbám"}, + {"Ọjọ́ Àìkú", "Ọjọ́ Ajé", "Ọjọ́ Ìsẹ́gun", "Ọjọ́rú", "Ọjọ́bọ", "Ọjọ́ Ẹtì", "Ọjọ́ Àbámẹ́ta"}, + {"Ṣẹ́r", "Èrèl", "Ẹrẹ̀n", "Ìgb", "Ẹ̀bi", "Òkú", "Agẹ", "Ògú", "Owe", "Ọ̀wà", "Bél", "Ọ̀pẹ"}, + {"Oṣù Ṣẹ́rẹ́", "Oṣù Èrèlè", "Oṣù Ẹrẹ̀nà", "Oṣù Ìgbé", "Oṣù Ẹ̀bibi", "Oṣù Òkúdu", "Oṣù Agẹmọ", "Oṣù Ògún", "Oṣù Owewe", "Oṣù Ọ̀wàrà", "Oṣù Bélú", "Oṣù Ọ̀pẹ̀"}, + {"Àárọ̀", "Ọ̀sán"}, +} + +var localeTableYoBJ = [5][]string{ + {"Àìk", "Aj", "Ìsɛ́g", "Ɔjɔ́r", "Ɔjɔ́b", "Ɛt", "Àbám"}, + {"Ɔjɔ́ Àìkú", "Ɔjɔ́ Ajé", "Ɔjɔ́ Ìsɛ́gun", "Ɔjɔ́rú", "Ɔjɔ́bɔ", "Ɔjɔ́ Ɛtì", "Ɔjɔ́ Àbámɛ́ta"}, + {"Shɛ́r", "Èrèl", "Ɛrɛ̀n", "Ìgb", "Ɛ̀bi", "Òkú", "Agɛ", "Ògú", "Owe", "Ɔ̀wà", "Bél", "Ɔ̀pɛ"}, + {"Oshù Shɛ́rɛ́", "Oshù Èrèlè", "Oshù Ɛrɛ̀nà", "Oshù Ìgbé", "Oshù Ɛ̀bibi", "Oshù Òkúdu", "Oshù Agɛmɔ", "Oshù Ògún", "Oshù Owewe", "Oshù Ɔ̀wàrà", "Oshù Bélú", "Oshù Ɔ̀pɛ̀"}, + {"Àárɔ̀", "Ɔ̀sán"}, +} + +var localeTableYoNG = [5][]string{ + {"Àìk", "Aj", "Ìsẹ́g", "Ọjọ́r", "Ọjọ́b", "Ẹt", "Àbám"}, + {"Ọjọ́ Àìkú", "Ọjọ́ Ajé", "Ọjọ́ Ìsẹ́gun", "Ọjọ́rú", "Ọjọ́bọ", "Ọjọ́ Ẹtì", "Ọjọ́ Àbámẹ́ta"}, + {"Ṣẹ́r", "Èrèl", "Ẹrẹ̀n", "Ìgb", "Ẹ̀bi", "Òkú", "Agẹ", "Ògú", "Owe", "Ọ̀wà", "Bél", "Ọ̀pẹ"}, + {"Oṣù Ṣẹ́rẹ́", "Oṣù Èrèlè", "Oṣù Ẹrẹ̀nà", "Oṣù Ìgbé", "Oṣù Ẹ̀bibi", "Oṣù Òkúdu", "Oṣù Agẹmọ", "Oṣù Ògún", "Oṣù Owewe", "Oṣù Ọ̀wàrà", "Oṣù Bélú", "Oṣù Ọ̀pẹ̀"}, + {"Àárọ̀", "Ọ̀sán"}, +} + +var localeTableYrl = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {}, +} + +var localeTableYrlBR = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {}, +} + +var localeTableYrlCO = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {"a.m.", "p.m."}, +} + +var localeTableYrlVE = [5][]string{ + {"mit", "mur", "mmk", "mms", "sup", "yuk", "sau"}, + {"mituú", "murakipí", "murakí-mukũi", "murakí-musapíri", "supapá", "yukuakú", "saurú"}, + {"ye", "mk", "ms", "id", "pu", "py", "pm", "ps", "pi", "yp", "yy", "ym"}, + {"yepé", "mukũi", "musapíri", "irũdí", "pú", "pú-yepé", "pú-mukũi", "pú-musapíri", "pú-irũdí", "yepé-putimaã", "yepé-yepé", "yepé-mukũi"}, + {"a.m.", "p.m."}, +} + +var localeTableYue = [5][]string{ + {}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableYueHans = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableYueHansCN = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableYueHant = [5][]string{ + {}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableYueHantHK = [5][]string{ + {}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZa = [5][]string{ + {}, + {"ngoenzsinghgiz", "singhgizit", "singhgizngeih", "singhgizsam", "singhgizseiq", "singhgizhaj", "singhgizroek"}, + {}, + {"ndwenit", "ndwenngeih", "ndwensam", "ndwenseiq", "ndwenngux", "ndwenloeg", "ndwencaet", "ndwenbet", "ndwengouj", "ndwencib", "ndwencib’it", "ndwencibngeih"}, + {}, +} + +var localeTableZaCN = [5][]string{ + {}, + {"ngoenzsinghgiz", "singhgizit", "singhgizngeih", "singhgizsam", "singhgizseiq", "singhgizhaj", "singhgizroek"}, + {}, + {"ndwenit", "ndwenngeih", "ndwensam", "ndwenseiq", "ndwenngux", "ndwenloeg", "ndwencaet", "ndwenbet", "ndwengouj", "ndwencib", "ndwencib’it", "ndwencibngeih"}, + {}, +} + +var localeTableZgh = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⴰⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableZghMA = [5][]string{ + {"ⴰⵙⴰ", "ⴰⵢⵏ", "ⴰⵙⵉ", "ⴰⴽⵕ", "ⴰⴽⵡ", "ⴰⵙⵉⵎ", "ⴰⵙⵉⴹ"}, + {"ⴰⵙⴰⵎⴰⵙ", "ⴰⵢⵏⴰⵙ", "ⴰⵙⵉⵏⴰⵙ", "ⴰⴽⵕⴰⵙ", "ⴰⴽⵡⴰⵙ", "ⴰⵙⵉⵎⵡⴰⵙ", "ⴰⵙⵉⴹⵢⴰⵙ"}, + {"ⵉⵏⵏ", "ⴱⵕⴰ", "ⵎⴰⵕ", "ⵉⴱⵔ", "ⵎⴰⵢ", "ⵢⵓⵏ", "ⵢⵓⵍ", "ⵖⵓⵛ", "ⵛⵓⵜ", "ⴽⵜⵓ", "ⵏⵓⵡ", "ⴷⵓⵊ"}, + {"ⵉⵏⵏⴰⵢⵔ", "ⴱⵕⴰⵢⵕ", "ⵎⴰⵕⵚ", "ⵉⴱⵔⵉⵔ", "ⵎⴰⵢⵢⵓ", "ⵢⵓⵏⵢⵓ", "ⵢⵓⵍⵢⵓⵣ", "ⵖⵓⵛⵜ", "ⵛⵓⵜⴰⵏⴱⵉⵔ", "ⴽⵜⵓⴱⵔ", "ⵏⵓⵡⴰⵏⴱⵉⵔ", "ⴷⵓⵊⴰⵏⴱⵉⵔ"}, + {"ⵜⵉⴼⴰⵡⵜ", "ⵜⴰⴷⴳⴳⵯⴰⵜ"}, +} + +var localeTableZh = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHans = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansCN = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansHK = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansMO = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHansSG = [5][]string{ + {"周日", "周一", "周二", "周三", "周四", "周五", "周六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"一月", "二月", "三月", "四月", "五月", "六月", "七月", "八月", "九月", "十月", "十一月", "十二月"}, + {"上午", "下午"}, +} + +var localeTableZhHant = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZhHantHK = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZhHantMO = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZhHantTW = [5][]string{ + {"週日", "週一", "週二", "週三", "週四", "週五", "週六"}, + {"星期日", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六"}, + {}, + {"1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月"}, + {"上午", "下午"}, +} + +var localeTableZu = [5][]string{ + {"Son", "Mso", "Bil", "Tha", "Sin", "Hla", "Mgq"}, + {"ISonto", "UMsombuluko", "ULwesibili", "ULwesithathu", "ULwesine", "ULwesihlanu", "UMgqibelo"}, + {"Jan", "Feb", "Mas", "Eph", "Mey", "Jun", "Jul", "Aga", "Sep", "Okt", "Nov", "Dis"}, + {"Januwari", "Februwari", "Mashi", "Ephreli", "Meyi", "Juni", "Julayi", "Agasti", "Septhemba", "Okthoba", "Novemba", "Disemba"}, + {"a", "p"}, +} + +var localeTableZuZA = [5][]string{ + {"Son", "Mso", "Bil", "Tha", "Sin", "Hla", "Mgq"}, + {"ISonto", "UMsombuluko", "ULwesibili", "ULwesithathu", "ULwesine", "ULwesihlanu", "UMgqibelo"}, + {"Jan", "Feb", "Mas", "Eph", "Mey", "Jun", "Jul", "Aga", "Sep", "Okt", "Nov", "Dis"}, + {"Januwari", "Februwari", "Mashi", "Ephreli", "Meyi", "Juni", "Julayi", "Agasti", "Septhemba", "Okthoba", "Novemba", "Disemba"}, + {"a", "p"}, +} + +const ( + LocaleAa = "aa" + LocaleAaDJ = "aa-DJ" + LocaleAaER = "aa-ER" + LocaleAaET = "aa-ET" + LocaleAb = "ab" + LocaleAbGE = "ab-GE" + LocaleAf = "af" + LocaleAfNA = "af-NA" + LocaleAfZA = "af-ZA" + LocaleAgq = "agq" + LocaleAgqCM = "agq-CM" + LocaleAk = "ak" + LocaleAkGH = "ak-GH" + LocaleAm = "am" + LocaleAmET = "am-ET" + LocaleAn = "an" + LocaleAnES = "an-ES" + LocaleApc = "apc" + LocaleApcSY = "apc-SY" + LocaleAr = "ar" + LocaleAr001 = "ar-001" + LocaleArAE = "ar-AE" + LocaleArBH = "ar-BH" + LocaleArDJ = "ar-DJ" + LocaleArDZ = "ar-DZ" + LocaleArEG = "ar-EG" + LocaleArEH = "ar-EH" + LocaleArER = "ar-ER" + LocaleArIL = "ar-IL" + LocaleArIQ = "ar-IQ" + LocaleArJO = "ar-JO" + LocaleArKM = "ar-KM" + LocaleArKW = "ar-KW" + LocaleArLB = "ar-LB" + LocaleArLY = "ar-LY" + LocaleArMA = "ar-MA" + LocaleArMR = "ar-MR" + LocaleArOM = "ar-OM" + LocaleArPS = "ar-PS" + LocaleArQA = "ar-QA" + LocaleArSA = "ar-SA" + LocaleArSD = "ar-SD" + LocaleArSO = "ar-SO" + LocaleArSS = "ar-SS" + LocaleArSY = "ar-SY" + LocaleArTD = "ar-TD" + LocaleArTN = "ar-TN" + LocaleArYE = "ar-YE" + LocaleAs = "as" + LocaleAsIN = "as-IN" + LocaleAsa = "asa" + LocaleAsaTZ = "asa-TZ" + LocaleAst = "ast" + LocaleAstES = "ast-ES" + LocaleAz = "az" + LocaleAzCyrl = "az-Cyrl" + LocaleAzCyrlAZ = "az-Cyrl-AZ" + LocaleAzLatn = "az-Latn" + LocaleAzLatnAZ = "az-Latn-AZ" + LocaleBal = "bal" + LocaleBalArab = "bal-Arab" + LocaleBalArabPK = "bal-Arab-PK" + LocaleBalLatn = "bal-Latn" + LocaleBalLatnPK = "bal-Latn-PK" + LocaleBas = "bas" + LocaleBasCM = "bas-CM" + LocaleBe = "be" + LocaleBeBY = "be-BY" + LocaleBetarask = "be-tarask" + LocaleBem = "bem" + LocaleBemZM = "bem-ZM" + LocaleBew = "bew" + LocaleBewID = "bew-ID" + LocaleBez = "bez" + LocaleBezTZ = "bez-TZ" + LocaleBg = "bg" + LocaleBgBG = "bg-BG" + LocaleBgc = "bgc" + LocaleBgcIN = "bgc-IN" + LocaleBho = "bho" + LocaleBhoIN = "bho-IN" + LocaleBlo = "blo" + LocaleBloBJ = "blo-BJ" + LocaleBm = "bm" + LocaleBmML = "bm-ML" + LocaleBn = "bn" + LocaleBnBD = "bn-BD" + LocaleBnIN = "bn-IN" + LocaleBo = "bo" + LocaleBoCN = "bo-CN" + LocaleBoIN = "bo-IN" + LocaleBr = "br" + LocaleBrFR = "br-FR" + LocaleBrx = "brx" + LocaleBrxIN = "brx-IN" + LocaleBs = "bs" + LocaleBsCyrl = "bs-Cyrl" + LocaleBsCyrlBA = "bs-Cyrl-BA" + LocaleBsLatn = "bs-Latn" + LocaleBsLatnBA = "bs-Latn-BA" + LocaleByn = "byn" + LocaleBynER = "byn-ER" + LocaleCa = "ca" + LocaleCaAD = "ca-AD" + LocaleCaES = "ca-ES" + LocaleCaESvalencia = "ca-ES-valencia" + LocaleCaFR = "ca-FR" + LocaleCaIT = "ca-IT" + LocaleCad = "cad" + LocaleCadUS = "cad-US" + LocaleCch = "cch" + LocaleCchNG = "cch-NG" + LocaleCcp = "ccp" + LocaleCcpBD = "ccp-BD" + LocaleCcpIN = "ccp-IN" + LocaleCe = "ce" + LocaleCeRU = "ce-RU" + LocaleCeb = "ceb" + LocaleCebPH = "ceb-PH" + LocaleCgg = "cgg" + LocaleCggUG = "cgg-UG" + LocaleChr = "chr" + LocaleChrUS = "chr-US" + LocaleCic = "cic" + LocaleCicUS = "cic-US" + LocaleCkb = "ckb" + LocaleCkbIQ = "ckb-IQ" + LocaleCkbIR = "ckb-IR" + LocaleCo = "co" + LocaleCoFR = "co-FR" + LocaleCs = "cs" + LocaleCsCZ = "cs-CZ" + LocaleCsw = "csw" + LocaleCswCA = "csw-CA" + LocaleCu = "cu" + LocaleCuRU = "cu-RU" + LocaleCv = "cv" + LocaleCvRU = "cv-RU" + LocaleCy = "cy" + LocaleCyGB = "cy-GB" + LocaleDa = "da" + LocaleDaDK = "da-DK" + LocaleDaGL = "da-GL" + LocaleDav = "dav" + LocaleDavKE = "dav-KE" + LocaleDe = "de" + LocaleDeAT = "de-AT" + LocaleDeBE = "de-BE" + LocaleDeCH = "de-CH" + LocaleDeDE = "de-DE" + LocaleDeIT = "de-IT" + LocaleDeLI = "de-LI" + LocaleDeLU = "de-LU" + LocaleDje = "dje" + LocaleDjeNE = "dje-NE" + LocaleDoi = "doi" + LocaleDoiIN = "doi-IN" + LocaleDsb = "dsb" + LocaleDsbDE = "dsb-DE" + LocaleDua = "dua" + LocaleDuaCM = "dua-CM" + LocaleDyo = "dyo" + LocaleDyoSN = "dyo-SN" + LocaleDz = "dz" + LocaleDzBT = "dz-BT" + LocaleEbu = "ebu" + LocaleEbuKE = "ebu-KE" + LocaleEe = "ee" + LocaleEeGH = "ee-GH" + LocaleEeTG = "ee-TG" + LocaleEl = "el" + LocaleElCY = "el-CY" + LocaleElGR = "el-GR" + LocaleElpolyton = "el-polyton" + LocaleEn = "en" + LocaleEn001 = "en-001" + LocaleEn150 = "en-150" + LocaleEnAE = "en-AE" + LocaleEnAG = "en-AG" + LocaleEnAI = "en-AI" + LocaleEnAS = "en-AS" + LocaleEnAT = "en-AT" + LocaleEnAU = "en-AU" + LocaleEnBB = "en-BB" + LocaleEnBE = "en-BE" + LocaleEnBI = "en-BI" + LocaleEnBM = "en-BM" + LocaleEnBS = "en-BS" + LocaleEnBW = "en-BW" + LocaleEnBZ = "en-BZ" + LocaleEnCA = "en-CA" + LocaleEnCC = "en-CC" + LocaleEnCH = "en-CH" + LocaleEnCK = "en-CK" + LocaleEnCM = "en-CM" + LocaleEnCX = "en-CX" + LocaleEnCY = "en-CY" + LocaleEnDE = "en-DE" + LocaleEnDG = "en-DG" + LocaleEnDK = "en-DK" + LocaleEnDM = "en-DM" + LocaleEnDsrt = "en-Dsrt" + LocaleEnDsrtUS = "en-Dsrt-US" + LocaleEnER = "en-ER" + LocaleEnFI = "en-FI" + LocaleEnFJ = "en-FJ" + LocaleEnFK = "en-FK" + LocaleEnFM = "en-FM" + LocaleEnGB = "en-GB" + LocaleEnGD = "en-GD" + LocaleEnGG = "en-GG" + LocaleEnGH = "en-GH" + LocaleEnGI = "en-GI" + LocaleEnGM = "en-GM" + LocaleEnGU = "en-GU" + LocaleEnGY = "en-GY" + LocaleEnHK = "en-HK" + LocaleEnID = "en-ID" + LocaleEnIE = "en-IE" + LocaleEnIL = "en-IL" + LocaleEnIM = "en-IM" + LocaleEnIN = "en-IN" + LocaleEnIO = "en-IO" + LocaleEnJE = "en-JE" + LocaleEnJM = "en-JM" + LocaleEnKE = "en-KE" + LocaleEnKI = "en-KI" + LocaleEnKN = "en-KN" + LocaleEnKY = "en-KY" + LocaleEnLC = "en-LC" + LocaleEnLR = "en-LR" + LocaleEnLS = "en-LS" + LocaleEnMG = "en-MG" + LocaleEnMH = "en-MH" + LocaleEnMO = "en-MO" + LocaleEnMP = "en-MP" + LocaleEnMS = "en-MS" + LocaleEnMT = "en-MT" + LocaleEnMU = "en-MU" + LocaleEnMV = "en-MV" + LocaleEnMW = "en-MW" + LocaleEnMY = "en-MY" + LocaleEnNA = "en-NA" + LocaleEnNF = "en-NF" + LocaleEnNG = "en-NG" + LocaleEnNL = "en-NL" + LocaleEnNR = "en-NR" + LocaleEnNU = "en-NU" + LocaleEnNZ = "en-NZ" + LocaleEnPG = "en-PG" + LocaleEnPH = "en-PH" + LocaleEnPK = "en-PK" + LocaleEnPN = "en-PN" + LocaleEnPR = "en-PR" + LocaleEnPW = "en-PW" + LocaleEnRW = "en-RW" + LocaleEnSB = "en-SB" + LocaleEnSC = "en-SC" + LocaleEnSD = "en-SD" + LocaleEnSE = "en-SE" + LocaleEnSG = "en-SG" + LocaleEnSH = "en-SH" + LocaleEnSI = "en-SI" + LocaleEnSL = "en-SL" + LocaleEnSS = "en-SS" + LocaleEnSX = "en-SX" + LocaleEnSZ = "en-SZ" + LocaleEnShaw = "en-Shaw" + LocaleEnShawGB = "en-Shaw-GB" + LocaleEnTC = "en-TC" + LocaleEnTK = "en-TK" + LocaleEnTO = "en-TO" + LocaleEnTT = "en-TT" + LocaleEnTV = "en-TV" + LocaleEnTZ = "en-TZ" + LocaleEnUG = "en-UG" + LocaleEnUM = "en-UM" + LocaleEnUS = "en-US" + LocaleEnUSuvaposix = "en-US-u-va-posix" + LocaleEnVC = "en-VC" + LocaleEnVG = "en-VG" + LocaleEnVI = "en-VI" + LocaleEnVU = "en-VU" + LocaleEnWS = "en-WS" + LocaleEnZA = "en-ZA" + LocaleEnZM = "en-ZM" + LocaleEnZW = "en-ZW" + LocaleEo = "eo" + LocaleEo001 = "eo-001" + LocaleEs = "es" + LocaleEs419 = "es-419" + LocaleEsAR = "es-AR" + LocaleEsBO = "es-BO" + LocaleEsBR = "es-BR" + LocaleEsBZ = "es-BZ" + LocaleEsCL = "es-CL" + LocaleEsCO = "es-CO" + LocaleEsCR = "es-CR" + LocaleEsCU = "es-CU" + LocaleEsDO = "es-DO" + LocaleEsEA = "es-EA" + LocaleEsEC = "es-EC" + LocaleEsES = "es-ES" + LocaleEsGQ = "es-GQ" + LocaleEsGT = "es-GT" + LocaleEsHN = "es-HN" + LocaleEsIC = "es-IC" + LocaleEsMX = "es-MX" + LocaleEsNI = "es-NI" + LocaleEsPA = "es-PA" + LocaleEsPE = "es-PE" + LocaleEsPH = "es-PH" + LocaleEsPR = "es-PR" + LocaleEsPY = "es-PY" + LocaleEsSV = "es-SV" + LocaleEsUS = "es-US" + LocaleEsUY = "es-UY" + LocaleEsVE = "es-VE" + LocaleEt = "et" + LocaleEtEE = "et-EE" + LocaleEu = "eu" + LocaleEuES = "eu-ES" + LocaleEwo = "ewo" + LocaleEwoCM = "ewo-CM" + LocaleFa = "fa" + LocaleFaAF = "fa-AF" + LocaleFaIR = "fa-IR" + LocaleFf = "ff" + LocaleFfAdlm = "ff-Adlm" + LocaleFfAdlmBF = "ff-Adlm-BF" + LocaleFfAdlmCM = "ff-Adlm-CM" + LocaleFfAdlmGH = "ff-Adlm-GH" + LocaleFfAdlmGM = "ff-Adlm-GM" + LocaleFfAdlmGN = "ff-Adlm-GN" + LocaleFfAdlmGW = "ff-Adlm-GW" + LocaleFfAdlmLR = "ff-Adlm-LR" + LocaleFfAdlmMR = "ff-Adlm-MR" + LocaleFfAdlmNE = "ff-Adlm-NE" + LocaleFfAdlmNG = "ff-Adlm-NG" + LocaleFfAdlmSL = "ff-Adlm-SL" + LocaleFfAdlmSN = "ff-Adlm-SN" + LocaleFfLatn = "ff-Latn" + LocaleFfLatnBF = "ff-Latn-BF" + LocaleFfLatnCM = "ff-Latn-CM" + LocaleFfLatnGH = "ff-Latn-GH" + LocaleFfLatnGM = "ff-Latn-GM" + LocaleFfLatnGN = "ff-Latn-GN" + LocaleFfLatnGW = "ff-Latn-GW" + LocaleFfLatnLR = "ff-Latn-LR" + LocaleFfLatnMR = "ff-Latn-MR" + LocaleFfLatnNE = "ff-Latn-NE" + LocaleFfLatnNG = "ff-Latn-NG" + LocaleFfLatnSL = "ff-Latn-SL" + LocaleFfLatnSN = "ff-Latn-SN" + LocaleFi = "fi" + LocaleFiFI = "fi-FI" + LocaleFil = "fil" + LocaleFilPH = "fil-PH" + LocaleFo = "fo" + LocaleFoDK = "fo-DK" + LocaleFoFO = "fo-FO" + LocaleFr = "fr" + LocaleFrBE = "fr-BE" + LocaleFrBF = "fr-BF" + LocaleFrBI = "fr-BI" + LocaleFrBJ = "fr-BJ" + LocaleFrBL = "fr-BL" + LocaleFrCA = "fr-CA" + LocaleFrCD = "fr-CD" + LocaleFrCF = "fr-CF" + LocaleFrCG = "fr-CG" + LocaleFrCH = "fr-CH" + LocaleFrCI = "fr-CI" + LocaleFrCM = "fr-CM" + LocaleFrDJ = "fr-DJ" + LocaleFrDZ = "fr-DZ" + LocaleFrFR = "fr-FR" + LocaleFrGA = "fr-GA" + LocaleFrGF = "fr-GF" + LocaleFrGN = "fr-GN" + LocaleFrGP = "fr-GP" + LocaleFrGQ = "fr-GQ" + LocaleFrHT = "fr-HT" + LocaleFrKM = "fr-KM" + LocaleFrLU = "fr-LU" + LocaleFrMA = "fr-MA" + LocaleFrMC = "fr-MC" + LocaleFrMF = "fr-MF" + LocaleFrMG = "fr-MG" + LocaleFrML = "fr-ML" + LocaleFrMQ = "fr-MQ" + LocaleFrMR = "fr-MR" + LocaleFrMU = "fr-MU" + LocaleFrNC = "fr-NC" + LocaleFrNE = "fr-NE" + LocaleFrPF = "fr-PF" + LocaleFrPM = "fr-PM" + LocaleFrRE = "fr-RE" + LocaleFrRW = "fr-RW" + LocaleFrSC = "fr-SC" + LocaleFrSN = "fr-SN" + LocaleFrSY = "fr-SY" + LocaleFrTD = "fr-TD" + LocaleFrTG = "fr-TG" + LocaleFrTN = "fr-TN" + LocaleFrVU = "fr-VU" + LocaleFrWF = "fr-WF" + LocaleFrYT = "fr-YT" + LocaleFrr = "frr" + LocaleFrrDE = "frr-DE" + LocaleFur = "fur" + LocaleFurIT = "fur-IT" + LocaleFy = "fy" + LocaleFyNL = "fy-NL" + LocaleGa = "ga" + LocaleGaGB = "ga-GB" + LocaleGaIE = "ga-IE" + LocaleGaa = "gaa" + LocaleGaaGH = "gaa-GH" + LocaleGd = "gd" + LocaleGdGB = "gd-GB" + LocaleGez = "gez" + LocaleGezER = "gez-ER" + LocaleGezET = "gez-ET" + LocaleGl = "gl" + LocaleGlES = "gl-ES" + LocaleGn = "gn" + LocaleGnPY = "gn-PY" + LocaleGsw = "gsw" + LocaleGswCH = "gsw-CH" + LocaleGswFR = "gsw-FR" + LocaleGswLI = "gsw-LI" + LocaleGu = "gu" + LocaleGuIN = "gu-IN" + LocaleGuz = "guz" + LocaleGuzKE = "guz-KE" + LocaleGv = "gv" + LocaleGvIM = "gv-IM" + LocaleHa = "ha" + LocaleHaArab = "ha-Arab" + LocaleHaArabNG = "ha-Arab-NG" + LocaleHaArabSD = "ha-Arab-SD" + LocaleHaGH = "ha-GH" + LocaleHaNE = "ha-NE" + LocaleHaNG = "ha-NG" + LocaleHaw = "haw" + LocaleHawUS = "haw-US" + LocaleHe = "he" + LocaleHeIL = "he-IL" + LocaleHi = "hi" + LocaleHiIN = "hi-IN" + LocaleHiLatn = "hi-Latn" + LocaleHiLatnIN = "hi-Latn-IN" + LocaleHnj = "hnj" + LocaleHr = "hr" + LocaleHrBA = "hr-BA" + LocaleHrHR = "hr-HR" + LocaleHsb = "hsb" + LocaleHsbDE = "hsb-DE" + LocaleHu = "hu" + LocaleHuHU = "hu-HU" + LocaleHy = "hy" + LocaleHyAM = "hy-AM" + LocaleIa = "ia" + LocaleIa001 = "ia-001" + LocaleId = "id" + LocaleIdID = "id-ID" + LocaleIe = "ie" + LocaleIeEE = "ie-EE" + LocaleIg = "ig" + LocaleIgNG = "ig-NG" + LocaleIi = "ii" + LocaleIiCN = "ii-CN" + LocaleIs = "is" + LocaleIsIS = "is-IS" + LocaleIt = "it" + LocaleItCH = "it-CH" + LocaleItIT = "it-IT" + LocaleItSM = "it-SM" + LocaleItVA = "it-VA" + LocaleIu = "iu" + LocaleIuCA = "iu-CA" + LocaleJa = "ja" + LocaleJaJP = "ja-JP" + LocaleJgo = "jgo" + LocaleJgoCM = "jgo-CM" + LocaleJmc = "jmc" + LocaleJmcTZ = "jmc-TZ" + LocaleJv = "jv" + LocaleJvID = "jv-ID" + LocaleKa = "ka" + LocaleKaGE = "ka-GE" + LocaleKab = "kab" + LocaleKabDZ = "kab-DZ" + LocaleKaj = "kaj" + LocaleKajNG = "kaj-NG" + LocaleKam = "kam" + LocaleKamKE = "kam-KE" + LocaleKcg = "kcg" + LocaleKcgNG = "kcg-NG" + LocaleKde = "kde" + LocaleKdeTZ = "kde-TZ" + LocaleKea = "kea" + LocaleKeaCV = "kea-CV" + LocaleKgp = "kgp" + LocaleKgpBR = "kgp-BR" + LocaleKhq = "khq" + LocaleKhqML = "khq-ML" + LocaleKi = "ki" + LocaleKiKE = "ki-KE" + LocaleKk = "kk" + LocaleKkKZ = "kk-KZ" + LocaleKkj = "kkj" + LocaleKkjCM = "kkj-CM" + LocaleKl = "kl" + LocaleKlGL = "kl-GL" + LocaleKln = "kln" + LocaleKlnKE = "kln-KE" + LocaleKm = "km" + LocaleKmKH = "km-KH" + LocaleKn = "kn" + LocaleKnIN = "kn-IN" + LocaleKo = "ko" + LocaleKoCN = "ko-CN" + LocaleKoKP = "ko-KP" + LocaleKoKR = "ko-KR" + LocaleKok = "kok" + LocaleKokIN = "kok-IN" + LocaleKs = "ks" + LocaleKsArab = "ks-Arab" + LocaleKsArabIN = "ks-Arab-IN" + LocaleKsDeva = "ks-Deva" + LocaleKsDevaIN = "ks-Deva-IN" + LocaleKsb = "ksb" + LocaleKsbTZ = "ksb-TZ" + LocaleKsf = "ksf" + LocaleKsfCM = "ksf-CM" + LocaleKsh = "ksh" + LocaleKshDE = "ksh-DE" + LocaleKu = "ku" + LocaleKuTR = "ku-TR" + LocaleKw = "kw" + LocaleKwGB = "kw-GB" + LocaleKxv = "kxv" + LocaleKxvDeva = "kxv-Deva" + LocaleKxvDevaIN = "kxv-Deva-IN" + LocaleKxvOrya = "kxv-Orya" + LocaleKxvOryaIN = "kxv-Orya-IN" + LocaleKxvTelu = "kxv-Telu" + LocaleKxvTeluIN = "kxv-Telu-IN" + LocaleKy = "ky" + LocaleKyKG = "ky-KG" + LocaleLa = "la" + LocaleLaVA = "la-VA" + LocaleLag = "lag" + LocaleLagTZ = "lag-TZ" + LocaleLb = "lb" + LocaleLbLU = "lb-LU" + LocaleLg = "lg" + LocaleLgUG = "lg-UG" + LocaleLij = "lij" + LocaleLijIT = "lij-IT" + LocaleLkt = "lkt" + LocaleLktUS = "lkt-US" + LocaleLmo = "lmo" + LocaleLmoIT = "lmo-IT" + LocaleLn = "ln" + LocaleLnAO = "ln-AO" + LocaleLnCD = "ln-CD" + LocaleLnCF = "ln-CF" + LocaleLnCG = "ln-CG" + LocaleLo = "lo" + LocaleLoLA = "lo-LA" + LocaleLrc = "lrc" + LocaleLrcIQ = "lrc-IQ" + LocaleLrcIR = "lrc-IR" + LocaleLt = "lt" + LocaleLtLT = "lt-LT" + LocaleLu = "lu" + LocaleLuCD = "lu-CD" + LocaleLuo = "luo" + LocaleLuoKE = "luo-KE" + LocaleLuy = "luy" + LocaleLuyKE = "luy-KE" + LocaleLv = "lv" + LocaleLvLV = "lv-LV" + LocaleMai = "mai" + LocaleMaiIN = "mai-IN" + LocaleMas = "mas" + LocaleMasKE = "mas-KE" + LocaleMasTZ = "mas-TZ" + LocaleMer = "mer" + LocaleMerKE = "mer-KE" + LocaleMfe = "mfe" + LocaleMfeMU = "mfe-MU" + LocaleMg = "mg" + LocaleMgMG = "mg-MG" + LocaleMgh = "mgh" + LocaleMghMZ = "mgh-MZ" + LocaleMgo = "mgo" + LocaleMgoCM = "mgo-CM" + LocaleMi = "mi" + LocaleMiNZ = "mi-NZ" + LocaleMk = "mk" + LocaleMkMK = "mk-MK" + LocaleMl = "ml" + LocaleMlIN = "ml-IN" + LocaleMn = "mn" + LocaleMnMN = "mn-MN" + LocaleMnMongMN = "mn-Mong-MN" + LocaleMni = "mni" + LocaleMniBeng = "mni-Beng" + LocaleMniBengIN = "mni-Beng-IN" + LocaleMr = "mr" + LocaleMrIN = "mr-IN" + LocaleMs = "ms" + LocaleMsArab = "ms-Arab" + LocaleMsArabBN = "ms-Arab-BN" + LocaleMsArabMY = "ms-Arab-MY" + LocaleMsBN = "ms-BN" + LocaleMsID = "ms-ID" + LocaleMsMY = "ms-MY" + LocaleMsSG = "ms-SG" + LocaleMt = "mt" + LocaleMtMT = "mt-MT" + LocaleMua = "mua" + LocaleMuaCM = "mua-CM" + LocaleMus = "mus" + LocaleMusUS = "mus-US" + LocaleMy = "my" + LocaleMyMM = "my-MM" + LocaleMyv = "myv" + LocaleMyvRU = "myv-RU" + LocaleMzn = "mzn" + LocaleMznIR = "mzn-IR" + LocaleNaq = "naq" + LocaleNaqNA = "naq-NA" + LocaleNd = "nd" + LocaleNdZW = "nd-ZW" + LocaleNds = "nds" + LocaleNdsDE = "nds-DE" + LocaleNdsNL = "nds-NL" + LocaleNe = "ne" + LocaleNeIN = "ne-IN" + LocaleNeNP = "ne-NP" + LocaleNl = "nl" + LocaleNlAW = "nl-AW" + LocaleNlBE = "nl-BE" + LocaleNlBQ = "nl-BQ" + LocaleNlCW = "nl-CW" + LocaleNlNL = "nl-NL" + LocaleNlSR = "nl-SR" + LocaleNlSX = "nl-SX" + LocaleNmg = "nmg" + LocaleNmgCM = "nmg-CM" + LocaleNn = "nn" + LocaleNnNO = "nn-NO" + LocaleNnh = "nnh" + LocaleNnhCM = "nnh-CM" + LocaleNo = "no" + LocaleNqo = "nqo" + LocaleNqoGN = "nqo-GN" + LocaleNr = "nr" + LocaleNrZA = "nr-ZA" + LocaleNso = "nso" + LocaleNsoZA = "nso-ZA" + LocaleNus = "nus" + LocaleNusSS = "nus-SS" + LocaleNy = "ny" + LocaleNyMW = "ny-MW" + LocaleNyn = "nyn" + LocaleNynUG = "nyn-UG" + LocaleOc = "oc" + LocaleOcES = "oc-ES" + LocaleOcFR = "oc-FR" + LocaleOm = "om" + LocaleOmET = "om-ET" + LocaleOmKE = "om-KE" + LocaleOr = "or" + LocaleOrIN = "or-IN" + LocaleOs = "os" + LocaleOsGE = "os-GE" + LocaleOsRU = "os-RU" + LocaleOsa = "osa" + LocaleOsaUS = "osa-US" + LocalePa = "pa" + LocalePaArab = "pa-Arab" + LocalePaArabPK = "pa-Arab-PK" + LocalePaGuru = "pa-Guru" + LocalePaGuruIN = "pa-Guru-IN" + LocalePap = "pap" + LocalePapAW = "pap-AW" + LocalePapCW = "pap-CW" + LocalePcm = "pcm" + LocalePcmNG = "pcm-NG" + LocalePis = "pis" + LocalePisSB = "pis-SB" + LocalePl = "pl" + LocalePlPL = "pl-PL" + LocalePrg = "prg" + LocalePrgPL = "prg-PL" + LocalePs = "ps" + LocalePsAF = "ps-AF" + LocalePsPK = "ps-PK" + LocalePt = "pt" + LocalePtAO = "pt-AO" + LocalePtBR = "pt-BR" + LocalePtCH = "pt-CH" + LocalePtCV = "pt-CV" + LocalePtGQ = "pt-GQ" + LocalePtGW = "pt-GW" + LocalePtLU = "pt-LU" + LocalePtMO = "pt-MO" + LocalePtMZ = "pt-MZ" + LocalePtPT = "pt-PT" + LocalePtST = "pt-ST" + LocalePtTL = "pt-TL" + LocaleQu = "qu" + LocaleQuBO = "qu-BO" + LocaleQuEC = "qu-EC" + LocaleQuPE = "qu-PE" + LocaleRaj = "raj" + LocaleRajIN = "raj-IN" + LocaleRif = "rif" + LocaleRifMA = "rif-MA" + LocaleRm = "rm" + LocaleRmCH = "rm-CH" + LocaleRn = "rn" + LocaleRnBI = "rn-BI" + LocaleRo = "ro" + LocaleRoMD = "ro-MD" + LocaleRoRO = "ro-RO" + LocaleRof = "rof" + LocaleRofTZ = "rof-TZ" + LocaleRu = "ru" + LocaleRuBY = "ru-BY" + LocaleRuKG = "ru-KG" + LocaleRuKZ = "ru-KZ" + LocaleRuMD = "ru-MD" + LocaleRuRU = "ru-RU" + LocaleRuUA = "ru-UA" + LocaleRw = "rw" + LocaleRwRW = "rw-RW" + LocaleRwk = "rwk" + LocaleRwkTZ = "rwk-TZ" + LocaleSa = "sa" + LocaleSaIN = "sa-IN" + LocaleSah = "sah" + LocaleSahRU = "sah-RU" + LocaleSaq = "saq" + LocaleSaqKE = "saq-KE" + LocaleSat = "sat" + LocaleSbp = "sbp" + LocaleSbpTZ = "sbp-TZ" + LocaleSc = "sc" + LocaleScIT = "sc-IT" + LocaleScn = "scn" + LocaleScnIT = "scn-IT" + LocaleSd = "sd" + LocaleSdArab = "sd-Arab" + LocaleSdArabPK = "sd-Arab-PK" + LocaleSdDeva = "sd-Deva" + LocaleSdDevaIN = "sd-Deva-IN" + LocaleSe = "se" + LocaleSeFI = "se-FI" + LocaleSeNO = "se-NO" + LocaleSeSE = "se-SE" + LocaleSeh = "seh" + LocaleSehMZ = "seh-MZ" + LocaleSes = "ses" + LocaleSesML = "ses-ML" + LocaleSg = "sg" + LocaleSgCF = "sg-CF" + LocaleShi = "shi" + LocaleShiLatn = "shi-Latn" + LocaleShiLatnMA = "shi-Latn-MA" + LocaleShiTfng = "shi-Tfng" + LocaleShiTfngMA = "shi-Tfng-MA" + LocaleSi = "si" + LocaleSiLK = "si-LK" + LocaleSid = "sid" + LocaleSidET = "sid-ET" + LocaleSk = "sk" + LocaleSkSK = "sk-SK" + LocaleSkr = "skr" + LocaleSkrPK = "skr-PK" + LocaleSl = "sl" + LocaleSlSI = "sl-SI" + LocaleSmn = "smn" + LocaleSmnFI = "smn-FI" + LocaleSn = "sn" + LocaleSnZW = "sn-ZW" + LocaleSo = "so" + LocaleSoDJ = "so-DJ" + LocaleSoET = "so-ET" + LocaleSoKE = "so-KE" + LocaleSoSO = "so-SO" + LocaleSq = "sq" + LocaleSqAL = "sq-AL" + LocaleSqMK = "sq-MK" + LocaleSqXK = "sq-XK" + LocaleSr = "sr" + LocaleSrCyrl = "sr-Cyrl" + LocaleSrCyrlBA = "sr-Cyrl-BA" + LocaleSrCyrlME = "sr-Cyrl-ME" + LocaleSrCyrlRS = "sr-Cyrl-RS" + LocaleSrCyrlXK = "sr-Cyrl-XK" + LocaleSrLatn = "sr-Latn" + LocaleSrLatnBA = "sr-Latn-BA" + LocaleSrLatnME = "sr-Latn-ME" + LocaleSrLatnRS = "sr-Latn-RS" + LocaleSrLatnXK = "sr-Latn-XK" + LocaleSs = "ss" + LocaleSsSZ = "ss-SZ" + LocaleSsZA = "ss-ZA" + LocaleSsy = "ssy" + LocaleSsyER = "ssy-ER" + LocaleSt = "st" + LocaleStLS = "st-LS" + LocaleStZA = "st-ZA" + LocaleSu = "su" + LocaleSuLatn = "su-Latn" + LocaleSuLatnID = "su-Latn-ID" + LocaleSv = "sv" + LocaleSvAX = "sv-AX" + LocaleSvFI = "sv-FI" + LocaleSvSE = "sv-SE" + LocaleSw = "sw" + LocaleSwCD = "sw-CD" + LocaleSwKE = "sw-KE" + LocaleSwTZ = "sw-TZ" + LocaleSwUG = "sw-UG" + LocaleSyr = "syr" + LocaleSyrIQ = "syr-IQ" + LocaleSyrSY = "syr-SY" + LocaleSzl = "szl" + LocaleSzlPL = "szl-PL" + LocaleTa = "ta" + LocaleTaIN = "ta-IN" + LocaleTaLK = "ta-LK" + LocaleTaMY = "ta-MY" + LocaleTaSG = "ta-SG" + LocaleTe = "te" + LocaleTeIN = "te-IN" + LocaleTeo = "teo" + LocaleTeoKE = "teo-KE" + LocaleTeoUG = "teo-UG" + LocaleTg = "tg" + LocaleTgTJ = "tg-TJ" + LocaleTh = "th" + LocaleThTH = "th-TH" + LocaleTi = "ti" + LocaleTiER = "ti-ER" + LocaleTiET = "ti-ET" + LocaleTig = "tig" + LocaleTigER = "tig-ER" + LocaleTk = "tk" + LocaleTkTM = "tk-TM" + LocaleTn = "tn" + LocaleTnBW = "tn-BW" + LocaleTnZA = "tn-ZA" + LocaleTo = "to" + LocaleToTO = "to-TO" + LocaleTok = "tok" + LocaleTok001 = "tok-001" + LocaleTpi = "tpi" + LocaleTpiPG = "tpi-PG" + LocaleTr = "tr" + LocaleTrCY = "tr-CY" + LocaleTrTR = "tr-TR" + LocaleTrv = "trv" + LocaleTrvTW = "trv-TW" + LocaleTrw = "trw" + LocaleTrwPK = "trw-PK" + LocaleTs = "ts" + LocaleTsZA = "ts-ZA" + LocaleTt = "tt" + LocaleTtRU = "tt-RU" + LocaleTwq = "twq" + LocaleTwqNE = "twq-NE" + LocaleTzm = "tzm" + LocaleTzmMA = "tzm-MA" + LocaleUg = "ug" + LocaleUgCN = "ug-CN" + LocaleUk = "uk" + LocaleUkUA = "uk-UA" + LocaleUnd = "und" + LocaleUr = "ur" + LocaleUrIN = "ur-IN" + LocaleUrPK = "ur-PK" + LocaleUz = "uz" + LocaleUzArab = "uz-Arab" + LocaleUzArabAF = "uz-Arab-AF" + LocaleUzCyrl = "uz-Cyrl" + LocaleUzCyrlUZ = "uz-Cyrl-UZ" + LocaleUzLatn = "uz-Latn" + LocaleUzLatnUZ = "uz-Latn-UZ" + LocaleVai = "vai" + LocaleVaiLatn = "vai-Latn" + LocaleVaiLatnLR = "vai-Latn-LR" + LocaleVaiVaii = "vai-Vaii" + LocaleVaiVaiiLR = "vai-Vaii-LR" + LocaleVe = "ve" + LocaleVeZA = "ve-ZA" + LocaleVec = "vec" + LocaleVecIT = "vec-IT" + LocaleVi = "vi" + LocaleViVN = "vi-VN" + LocaleVmw = "vmw" + LocaleVmwMZ = "vmw-MZ" + LocaleVo = "vo" + LocaleVo001 = "vo-001" + LocaleVun = "vun" + LocaleVunTZ = "vun-TZ" + LocaleWae = "wae" + LocaleWaeCH = "wae-CH" + LocaleWal = "wal" + LocaleWalET = "wal-ET" + LocaleWo = "wo" + LocaleWoSN = "wo-SN" + LocaleXh = "xh" + LocaleXhZA = "xh-ZA" + LocaleXnr = "xnr" + LocaleXnrIN = "xnr-IN" + LocaleXog = "xog" + LocaleXogUG = "xog-UG" + LocaleYav = "yav" + LocaleYavCM = "yav-CM" + LocaleYi = "yi" + LocaleYiUA = "yi-UA" + LocaleYo = "yo" + LocaleYoBJ = "yo-BJ" + LocaleYoNG = "yo-NG" + LocaleYrl = "yrl" + LocaleYrlBR = "yrl-BR" + LocaleYrlCO = "yrl-CO" + LocaleYrlVE = "yrl-VE" + LocaleYue = "yue" + LocaleYueHans = "yue-Hans" + LocaleYueHansCN = "yue-Hans-CN" + LocaleYueHant = "yue-Hant" + LocaleYueHantHK = "yue-Hant-HK" + LocaleZa = "za" + LocaleZaCN = "za-CN" + LocaleZgh = "zgh" + LocaleZghMA = "zgh-MA" + LocaleZh = "zh" + LocaleZhHans = "zh-Hans" + LocaleZhHansCN = "zh-Hans-CN" + LocaleZhHansHK = "zh-Hans-HK" + LocaleZhHansMO = "zh-Hans-MO" + LocaleZhHansSG = "zh-Hans-SG" + LocaleZhHant = "zh-Hant" + LocaleZhHantHK = "zh-Hant-HK" + LocaleZhHantMO = "zh-Hant-MO" + LocaleZhHantTW = "zh-Hant-TW" + LocaleZu = "zu" + LocaleZuZA = "zu-ZA" +) + +var tables = map[string][5][]string{ + LocaleAa: localeTableAa, + LocaleAaDJ: localeTableAaDJ, + LocaleAaER: localeTableAaER, + LocaleAaET: localeTableAaET, + LocaleAb: localeTableAb, + LocaleAbGE: localeTableAbGE, + LocaleAf: localeTableAf, + LocaleAfNA: localeTableAfNA, + LocaleAfZA: localeTableAfZA, + LocaleAgq: localeTableAgq, + LocaleAgqCM: localeTableAgqCM, + LocaleAk: localeTableAk, + LocaleAkGH: localeTableAkGH, + LocaleAm: localeTableAm, + LocaleAmET: localeTableAmET, + LocaleAn: localeTableAn, + LocaleAnES: localeTableAnES, + LocaleApc: localeTableApc, + LocaleApcSY: localeTableApcSY, + LocaleAr: localeTableAr, + LocaleAr001: localeTableAr001, + LocaleArAE: localeTableArAE, + LocaleArBH: localeTableArBH, + LocaleArDJ: localeTableArDJ, + LocaleArDZ: localeTableArDZ, + LocaleArEG: localeTableArEG, + LocaleArEH: localeTableArEH, + LocaleArER: localeTableArER, + LocaleArIL: localeTableArIL, + LocaleArIQ: localeTableArIQ, + LocaleArJO: localeTableArJO, + LocaleArKM: localeTableArKM, + LocaleArKW: localeTableArKW, + LocaleArLB: localeTableArLB, + LocaleArLY: localeTableArLY, + LocaleArMA: localeTableArMA, + LocaleArMR: localeTableArMR, + LocaleArOM: localeTableArOM, + LocaleArPS: localeTableArPS, + LocaleArQA: localeTableArQA, + LocaleArSA: localeTableArSA, + LocaleArSD: localeTableArSD, + LocaleArSO: localeTableArSO, + LocaleArSS: localeTableArSS, + LocaleArSY: localeTableArSY, + LocaleArTD: localeTableArTD, + LocaleArTN: localeTableArTN, + LocaleArYE: localeTableArYE, + LocaleAs: localeTableAs, + LocaleAsIN: localeTableAsIN, + LocaleAsa: localeTableAsa, + LocaleAsaTZ: localeTableAsaTZ, + LocaleAst: localeTableAst, + LocaleAstES: localeTableAstES, + LocaleAz: localeTableAz, + LocaleAzCyrl: localeTableAzCyrl, + LocaleAzCyrlAZ: localeTableAzCyrlAZ, + LocaleAzLatn: localeTableAzLatn, + LocaleAzLatnAZ: localeTableAzLatnAZ, + LocaleBal: localeTableBal, + LocaleBalArab: localeTableBalArab, + LocaleBalArabPK: localeTableBalArabPK, + LocaleBalLatn: localeTableBalLatn, + LocaleBalLatnPK: localeTableBalLatnPK, + LocaleBas: localeTableBas, + LocaleBasCM: localeTableBasCM, + LocaleBe: localeTableBe, + LocaleBeBY: localeTableBeBY, + LocaleBetarask: localeTableBetarask, + LocaleBem: localeTableBem, + LocaleBemZM: localeTableBemZM, + LocaleBew: localeTableBew, + LocaleBewID: localeTableBewID, + LocaleBez: localeTableBez, + LocaleBezTZ: localeTableBezTZ, + LocaleBg: localeTableBg, + LocaleBgBG: localeTableBgBG, + LocaleBgc: localeTableBgc, + LocaleBgcIN: localeTableBgcIN, + LocaleBho: localeTableBho, + LocaleBhoIN: localeTableBhoIN, + LocaleBlo: localeTableBlo, + LocaleBloBJ: localeTableBloBJ, + LocaleBm: localeTableBm, + LocaleBmML: localeTableBmML, + LocaleBn: localeTableBn, + LocaleBnBD: localeTableBnBD, + LocaleBnIN: localeTableBnIN, + LocaleBo: localeTableBo, + LocaleBoCN: localeTableBoCN, + LocaleBoIN: localeTableBoIN, + LocaleBr: localeTableBr, + LocaleBrFR: localeTableBrFR, + LocaleBrx: localeTableBrx, + LocaleBrxIN: localeTableBrxIN, + LocaleBs: localeTableBs, + LocaleBsCyrl: localeTableBsCyrl, + LocaleBsCyrlBA: localeTableBsCyrlBA, + LocaleBsLatn: localeTableBsLatn, + LocaleBsLatnBA: localeTableBsLatnBA, + LocaleByn: localeTableByn, + LocaleBynER: localeTableBynER, + LocaleCa: localeTableCa, + LocaleCaAD: localeTableCaAD, + LocaleCaES: localeTableCaES, + LocaleCaESvalencia: localeTableCaESvalencia, + LocaleCaFR: localeTableCaFR, + LocaleCaIT: localeTableCaIT, + LocaleCad: localeTableCad, + LocaleCadUS: localeTableCadUS, + LocaleCch: localeTableCch, + LocaleCchNG: localeTableCchNG, + LocaleCcp: localeTableCcp, + LocaleCcpBD: localeTableCcpBD, + LocaleCcpIN: localeTableCcpIN, + LocaleCe: localeTableCe, + LocaleCeRU: localeTableCeRU, + LocaleCeb: localeTableCeb, + LocaleCebPH: localeTableCebPH, + LocaleCgg: localeTableCgg, + LocaleCggUG: localeTableCggUG, + LocaleChr: localeTableChr, + LocaleChrUS: localeTableChrUS, + LocaleCic: localeTableCic, + LocaleCicUS: localeTableCicUS, + LocaleCkb: localeTableCkb, + LocaleCkbIQ: localeTableCkbIQ, + LocaleCkbIR: localeTableCkbIR, + LocaleCo: localeTableCo, + LocaleCoFR: localeTableCoFR, + LocaleCs: localeTableCs, + LocaleCsCZ: localeTableCsCZ, + LocaleCsw: localeTableCsw, + LocaleCswCA: localeTableCswCA, + LocaleCu: localeTableCu, + LocaleCuRU: localeTableCuRU, + LocaleCv: localeTableCv, + LocaleCvRU: localeTableCvRU, + LocaleCy: localeTableCy, + LocaleCyGB: localeTableCyGB, + LocaleDa: localeTableDa, + LocaleDaDK: localeTableDaDK, + LocaleDaGL: localeTableDaGL, + LocaleDav: localeTableDav, + LocaleDavKE: localeTableDavKE, + LocaleDe: localeTableDe, + LocaleDeAT: localeTableDeAT, + LocaleDeBE: localeTableDeBE, + LocaleDeCH: localeTableDeCH, + LocaleDeDE: localeTableDeDE, + LocaleDeIT: localeTableDeIT, + LocaleDeLI: localeTableDeLI, + LocaleDeLU: localeTableDeLU, + LocaleDje: localeTableDje, + LocaleDjeNE: localeTableDjeNE, + LocaleDoi: localeTableDoi, + LocaleDoiIN: localeTableDoiIN, + LocaleDsb: localeTableDsb, + LocaleDsbDE: localeTableDsbDE, + LocaleDua: localeTableDua, + LocaleDuaCM: localeTableDuaCM, + LocaleDyo: localeTableDyo, + LocaleDyoSN: localeTableDyoSN, + LocaleDz: localeTableDz, + LocaleDzBT: localeTableDzBT, + LocaleEbu: localeTableEbu, + LocaleEbuKE: localeTableEbuKE, + LocaleEe: localeTableEe, + LocaleEeGH: localeTableEeGH, + LocaleEeTG: localeTableEeTG, + LocaleEl: localeTableEl, + LocaleElCY: localeTableElCY, + LocaleElGR: localeTableElGR, + LocaleElpolyton: localeTableElpolyton, + LocaleEn: localeTableEn, + LocaleEn001: localeTableEn001, + LocaleEn150: localeTableEn150, + LocaleEnAE: localeTableEnAE, + LocaleEnAG: localeTableEnAG, + LocaleEnAI: localeTableEnAI, + LocaleEnAS: localeTableEnAS, + LocaleEnAT: localeTableEnAT, + LocaleEnAU: localeTableEnAU, + LocaleEnBB: localeTableEnBB, + LocaleEnBE: localeTableEnBE, + LocaleEnBI: localeTableEnBI, + LocaleEnBM: localeTableEnBM, + LocaleEnBS: localeTableEnBS, + LocaleEnBW: localeTableEnBW, + LocaleEnBZ: localeTableEnBZ, + LocaleEnCA: localeTableEnCA, + LocaleEnCC: localeTableEnCC, + LocaleEnCH: localeTableEnCH, + LocaleEnCK: localeTableEnCK, + LocaleEnCM: localeTableEnCM, + LocaleEnCX: localeTableEnCX, + LocaleEnCY: localeTableEnCY, + LocaleEnDE: localeTableEnDE, + LocaleEnDG: localeTableEnDG, + LocaleEnDK: localeTableEnDK, + LocaleEnDM: localeTableEnDM, + LocaleEnDsrt: localeTableEnDsrt, + LocaleEnDsrtUS: localeTableEnDsrtUS, + LocaleEnER: localeTableEnER, + LocaleEnFI: localeTableEnFI, + LocaleEnFJ: localeTableEnFJ, + LocaleEnFK: localeTableEnFK, + LocaleEnFM: localeTableEnFM, + LocaleEnGB: localeTableEnGB, + LocaleEnGD: localeTableEnGD, + LocaleEnGG: localeTableEnGG, + LocaleEnGH: localeTableEnGH, + LocaleEnGI: localeTableEnGI, + LocaleEnGM: localeTableEnGM, + LocaleEnGU: localeTableEnGU, + LocaleEnGY: localeTableEnGY, + LocaleEnHK: localeTableEnHK, + LocaleEnID: localeTableEnID, + LocaleEnIE: localeTableEnIE, + LocaleEnIL: localeTableEnIL, + LocaleEnIM: localeTableEnIM, + LocaleEnIN: localeTableEnIN, + LocaleEnIO: localeTableEnIO, + LocaleEnJE: localeTableEnJE, + LocaleEnJM: localeTableEnJM, + LocaleEnKE: localeTableEnKE, + LocaleEnKI: localeTableEnKI, + LocaleEnKN: localeTableEnKN, + LocaleEnKY: localeTableEnKY, + LocaleEnLC: localeTableEnLC, + LocaleEnLR: localeTableEnLR, + LocaleEnLS: localeTableEnLS, + LocaleEnMG: localeTableEnMG, + LocaleEnMH: localeTableEnMH, + LocaleEnMO: localeTableEnMO, + LocaleEnMP: localeTableEnMP, + LocaleEnMS: localeTableEnMS, + LocaleEnMT: localeTableEnMT, + LocaleEnMU: localeTableEnMU, + LocaleEnMV: localeTableEnMV, + LocaleEnMW: localeTableEnMW, + LocaleEnMY: localeTableEnMY, + LocaleEnNA: localeTableEnNA, + LocaleEnNF: localeTableEnNF, + LocaleEnNG: localeTableEnNG, + LocaleEnNL: localeTableEnNL, + LocaleEnNR: localeTableEnNR, + LocaleEnNU: localeTableEnNU, + LocaleEnNZ: localeTableEnNZ, + LocaleEnPG: localeTableEnPG, + LocaleEnPH: localeTableEnPH, + LocaleEnPK: localeTableEnPK, + LocaleEnPN: localeTableEnPN, + LocaleEnPR: localeTableEnPR, + LocaleEnPW: localeTableEnPW, + LocaleEnRW: localeTableEnRW, + LocaleEnSB: localeTableEnSB, + LocaleEnSC: localeTableEnSC, + LocaleEnSD: localeTableEnSD, + LocaleEnSE: localeTableEnSE, + LocaleEnSG: localeTableEnSG, + LocaleEnSH: localeTableEnSH, + LocaleEnSI: localeTableEnSI, + LocaleEnSL: localeTableEnSL, + LocaleEnSS: localeTableEnSS, + LocaleEnSX: localeTableEnSX, + LocaleEnSZ: localeTableEnSZ, + LocaleEnShaw: localeTableEnShaw, + LocaleEnShawGB: localeTableEnShawGB, + LocaleEnTC: localeTableEnTC, + LocaleEnTK: localeTableEnTK, + LocaleEnTO: localeTableEnTO, + LocaleEnTT: localeTableEnTT, + LocaleEnTV: localeTableEnTV, + LocaleEnTZ: localeTableEnTZ, + LocaleEnUG: localeTableEnUG, + LocaleEnUM: localeTableEnUM, + LocaleEnUS: localeTableEnUS, + LocaleEnUSuvaposix: localeTableEnUSuvaposix, + LocaleEnVC: localeTableEnVC, + LocaleEnVG: localeTableEnVG, + LocaleEnVI: localeTableEnVI, + LocaleEnVU: localeTableEnVU, + LocaleEnWS: localeTableEnWS, + LocaleEnZA: localeTableEnZA, + LocaleEnZM: localeTableEnZM, + LocaleEnZW: localeTableEnZW, + LocaleEo: localeTableEo, + LocaleEo001: localeTableEo001, + LocaleEs: localeTableEs, + LocaleEs419: localeTableEs419, + LocaleEsAR: localeTableEsAR, + LocaleEsBO: localeTableEsBO, + LocaleEsBR: localeTableEsBR, + LocaleEsBZ: localeTableEsBZ, + LocaleEsCL: localeTableEsCL, + LocaleEsCO: localeTableEsCO, + LocaleEsCR: localeTableEsCR, + LocaleEsCU: localeTableEsCU, + LocaleEsDO: localeTableEsDO, + LocaleEsEA: localeTableEsEA, + LocaleEsEC: localeTableEsEC, + LocaleEsES: localeTableEsES, + LocaleEsGQ: localeTableEsGQ, + LocaleEsGT: localeTableEsGT, + LocaleEsHN: localeTableEsHN, + LocaleEsIC: localeTableEsIC, + LocaleEsMX: localeTableEsMX, + LocaleEsNI: localeTableEsNI, + LocaleEsPA: localeTableEsPA, + LocaleEsPE: localeTableEsPE, + LocaleEsPH: localeTableEsPH, + LocaleEsPR: localeTableEsPR, + LocaleEsPY: localeTableEsPY, + LocaleEsSV: localeTableEsSV, + LocaleEsUS: localeTableEsUS, + LocaleEsUY: localeTableEsUY, + LocaleEsVE: localeTableEsVE, + LocaleEt: localeTableEt, + LocaleEtEE: localeTableEtEE, + LocaleEu: localeTableEu, + LocaleEuES: localeTableEuES, + LocaleEwo: localeTableEwo, + LocaleEwoCM: localeTableEwoCM, + LocaleFa: localeTableFa, + LocaleFaAF: localeTableFaAF, + LocaleFaIR: localeTableFaIR, + LocaleFf: localeTableFf, + LocaleFfAdlm: localeTableFfAdlm, + LocaleFfAdlmBF: localeTableFfAdlmBF, + LocaleFfAdlmCM: localeTableFfAdlmCM, + LocaleFfAdlmGH: localeTableFfAdlmGH, + LocaleFfAdlmGM: localeTableFfAdlmGM, + LocaleFfAdlmGN: localeTableFfAdlmGN, + LocaleFfAdlmGW: localeTableFfAdlmGW, + LocaleFfAdlmLR: localeTableFfAdlmLR, + LocaleFfAdlmMR: localeTableFfAdlmMR, + LocaleFfAdlmNE: localeTableFfAdlmNE, + LocaleFfAdlmNG: localeTableFfAdlmNG, + LocaleFfAdlmSL: localeTableFfAdlmSL, + LocaleFfAdlmSN: localeTableFfAdlmSN, + LocaleFfLatn: localeTableFfLatn, + LocaleFfLatnBF: localeTableFfLatnBF, + LocaleFfLatnCM: localeTableFfLatnCM, + LocaleFfLatnGH: localeTableFfLatnGH, + LocaleFfLatnGM: localeTableFfLatnGM, + LocaleFfLatnGN: localeTableFfLatnGN, + LocaleFfLatnGW: localeTableFfLatnGW, + LocaleFfLatnLR: localeTableFfLatnLR, + LocaleFfLatnMR: localeTableFfLatnMR, + LocaleFfLatnNE: localeTableFfLatnNE, + LocaleFfLatnNG: localeTableFfLatnNG, + LocaleFfLatnSL: localeTableFfLatnSL, + LocaleFfLatnSN: localeTableFfLatnSN, + LocaleFi: localeTableFi, + LocaleFiFI: localeTableFiFI, + LocaleFil: localeTableFil, + LocaleFilPH: localeTableFilPH, + LocaleFo: localeTableFo, + LocaleFoDK: localeTableFoDK, + LocaleFoFO: localeTableFoFO, + LocaleFr: localeTableFr, + LocaleFrBE: localeTableFrBE, + LocaleFrBF: localeTableFrBF, + LocaleFrBI: localeTableFrBI, + LocaleFrBJ: localeTableFrBJ, + LocaleFrBL: localeTableFrBL, + LocaleFrCA: localeTableFrCA, + LocaleFrCD: localeTableFrCD, + LocaleFrCF: localeTableFrCF, + LocaleFrCG: localeTableFrCG, + LocaleFrCH: localeTableFrCH, + LocaleFrCI: localeTableFrCI, + LocaleFrCM: localeTableFrCM, + LocaleFrDJ: localeTableFrDJ, + LocaleFrDZ: localeTableFrDZ, + LocaleFrFR: localeTableFrFR, + LocaleFrGA: localeTableFrGA, + LocaleFrGF: localeTableFrGF, + LocaleFrGN: localeTableFrGN, + LocaleFrGP: localeTableFrGP, + LocaleFrGQ: localeTableFrGQ, + LocaleFrHT: localeTableFrHT, + LocaleFrKM: localeTableFrKM, + LocaleFrLU: localeTableFrLU, + LocaleFrMA: localeTableFrMA, + LocaleFrMC: localeTableFrMC, + LocaleFrMF: localeTableFrMF, + LocaleFrMG: localeTableFrMG, + LocaleFrML: localeTableFrML, + LocaleFrMQ: localeTableFrMQ, + LocaleFrMR: localeTableFrMR, + LocaleFrMU: localeTableFrMU, + LocaleFrNC: localeTableFrNC, + LocaleFrNE: localeTableFrNE, + LocaleFrPF: localeTableFrPF, + LocaleFrPM: localeTableFrPM, + LocaleFrRE: localeTableFrRE, + LocaleFrRW: localeTableFrRW, + LocaleFrSC: localeTableFrSC, + LocaleFrSN: localeTableFrSN, + LocaleFrSY: localeTableFrSY, + LocaleFrTD: localeTableFrTD, + LocaleFrTG: localeTableFrTG, + LocaleFrTN: localeTableFrTN, + LocaleFrVU: localeTableFrVU, + LocaleFrWF: localeTableFrWF, + LocaleFrYT: localeTableFrYT, + LocaleFrr: localeTableFrr, + LocaleFrrDE: localeTableFrrDE, + LocaleFur: localeTableFur, + LocaleFurIT: localeTableFurIT, + LocaleFy: localeTableFy, + LocaleFyNL: localeTableFyNL, + LocaleGa: localeTableGa, + LocaleGaGB: localeTableGaGB, + LocaleGaIE: localeTableGaIE, + LocaleGaa: localeTableGaa, + LocaleGaaGH: localeTableGaaGH, + LocaleGd: localeTableGd, + LocaleGdGB: localeTableGdGB, + LocaleGez: localeTableGez, + LocaleGezER: localeTableGezER, + LocaleGezET: localeTableGezET, + LocaleGl: localeTableGl, + LocaleGlES: localeTableGlES, + LocaleGn: localeTableGn, + LocaleGnPY: localeTableGnPY, + LocaleGsw: localeTableGsw, + LocaleGswCH: localeTableGswCH, + LocaleGswFR: localeTableGswFR, + LocaleGswLI: localeTableGswLI, + LocaleGu: localeTableGu, + LocaleGuIN: localeTableGuIN, + LocaleGuz: localeTableGuz, + LocaleGuzKE: localeTableGuzKE, + LocaleGv: localeTableGv, + LocaleGvIM: localeTableGvIM, + LocaleHa: localeTableHa, + LocaleHaArab: localeTableHaArab, + LocaleHaArabNG: localeTableHaArabNG, + LocaleHaArabSD: localeTableHaArabSD, + LocaleHaGH: localeTableHaGH, + LocaleHaNE: localeTableHaNE, + LocaleHaNG: localeTableHaNG, + LocaleHaw: localeTableHaw, + LocaleHawUS: localeTableHawUS, + LocaleHe: localeTableHe, + LocaleHeIL: localeTableHeIL, + LocaleHi: localeTableHi, + LocaleHiIN: localeTableHiIN, + LocaleHiLatn: localeTableHiLatn, + LocaleHiLatnIN: localeTableHiLatnIN, + LocaleHnj: localeTableHnj, + LocaleHr: localeTableHr, + LocaleHrBA: localeTableHrBA, + LocaleHrHR: localeTableHrHR, + LocaleHsb: localeTableHsb, + LocaleHsbDE: localeTableHsbDE, + LocaleHu: localeTableHu, + LocaleHuHU: localeTableHuHU, + LocaleHy: localeTableHy, + LocaleHyAM: localeTableHyAM, + LocaleIa: localeTableIa, + LocaleIa001: localeTableIa001, + LocaleId: localeTableId, + LocaleIdID: localeTableIdID, + LocaleIe: localeTableIe, + LocaleIeEE: localeTableIeEE, + LocaleIg: localeTableIg, + LocaleIgNG: localeTableIgNG, + LocaleIi: localeTableIi, + LocaleIiCN: localeTableIiCN, + LocaleIs: localeTableIs, + LocaleIsIS: localeTableIsIS, + LocaleIt: localeTableIt, + LocaleItCH: localeTableItCH, + LocaleItIT: localeTableItIT, + LocaleItSM: localeTableItSM, + LocaleItVA: localeTableItVA, + LocaleIu: localeTableIu, + LocaleIuCA: localeTableIuCA, + LocaleJa: localeTableJa, + LocaleJaJP: localeTableJaJP, + LocaleJgo: localeTableJgo, + LocaleJgoCM: localeTableJgoCM, + LocaleJmc: localeTableJmc, + LocaleJmcTZ: localeTableJmcTZ, + LocaleJv: localeTableJv, + LocaleJvID: localeTableJvID, + LocaleKa: localeTableKa, + LocaleKaGE: localeTableKaGE, + LocaleKab: localeTableKab, + LocaleKabDZ: localeTableKabDZ, + LocaleKaj: localeTableKaj, + LocaleKajNG: localeTableKajNG, + LocaleKam: localeTableKam, + LocaleKamKE: localeTableKamKE, + LocaleKcg: localeTableKcg, + LocaleKcgNG: localeTableKcgNG, + LocaleKde: localeTableKde, + LocaleKdeTZ: localeTableKdeTZ, + LocaleKea: localeTableKea, + LocaleKeaCV: localeTableKeaCV, + LocaleKgp: localeTableKgp, + LocaleKgpBR: localeTableKgpBR, + LocaleKhq: localeTableKhq, + LocaleKhqML: localeTableKhqML, + LocaleKi: localeTableKi, + LocaleKiKE: localeTableKiKE, + LocaleKk: localeTableKk, + LocaleKkKZ: localeTableKkKZ, + LocaleKkj: localeTableKkj, + LocaleKkjCM: localeTableKkjCM, + LocaleKl: localeTableKl, + LocaleKlGL: localeTableKlGL, + LocaleKln: localeTableKln, + LocaleKlnKE: localeTableKlnKE, + LocaleKm: localeTableKm, + LocaleKmKH: localeTableKmKH, + LocaleKn: localeTableKn, + LocaleKnIN: localeTableKnIN, + LocaleKo: localeTableKo, + LocaleKoCN: localeTableKoCN, + LocaleKoKP: localeTableKoKP, + LocaleKoKR: localeTableKoKR, + LocaleKok: localeTableKok, + LocaleKokIN: localeTableKokIN, + LocaleKs: localeTableKs, + LocaleKsArab: localeTableKsArab, + LocaleKsArabIN: localeTableKsArabIN, + LocaleKsDeva: localeTableKsDeva, + LocaleKsDevaIN: localeTableKsDevaIN, + LocaleKsb: localeTableKsb, + LocaleKsbTZ: localeTableKsbTZ, + LocaleKsf: localeTableKsf, + LocaleKsfCM: localeTableKsfCM, + LocaleKsh: localeTableKsh, + LocaleKshDE: localeTableKshDE, + LocaleKu: localeTableKu, + LocaleKuTR: localeTableKuTR, + LocaleKw: localeTableKw, + LocaleKwGB: localeTableKwGB, + LocaleKxv: localeTableKxv, + LocaleKxvDeva: localeTableKxvDeva, + LocaleKxvDevaIN: localeTableKxvDevaIN, + LocaleKxvOrya: localeTableKxvOrya, + LocaleKxvOryaIN: localeTableKxvOryaIN, + LocaleKxvTelu: localeTableKxvTelu, + LocaleKxvTeluIN: localeTableKxvTeluIN, + LocaleKy: localeTableKy, + LocaleKyKG: localeTableKyKG, + LocaleLa: localeTableLa, + LocaleLaVA: localeTableLaVA, + LocaleLag: localeTableLag, + LocaleLagTZ: localeTableLagTZ, + LocaleLb: localeTableLb, + LocaleLbLU: localeTableLbLU, + LocaleLg: localeTableLg, + LocaleLgUG: localeTableLgUG, + LocaleLij: localeTableLij, + LocaleLijIT: localeTableLijIT, + LocaleLkt: localeTableLkt, + LocaleLktUS: localeTableLktUS, + LocaleLmo: localeTableLmo, + LocaleLmoIT: localeTableLmoIT, + LocaleLn: localeTableLn, + LocaleLnAO: localeTableLnAO, + LocaleLnCD: localeTableLnCD, + LocaleLnCF: localeTableLnCF, + LocaleLnCG: localeTableLnCG, + LocaleLo: localeTableLo, + LocaleLoLA: localeTableLoLA, + LocaleLrc: localeTableLrc, + LocaleLrcIQ: localeTableLrcIQ, + LocaleLrcIR: localeTableLrcIR, + LocaleLt: localeTableLt, + LocaleLtLT: localeTableLtLT, + LocaleLu: localeTableLu, + LocaleLuCD: localeTableLuCD, + LocaleLuo: localeTableLuo, + LocaleLuoKE: localeTableLuoKE, + LocaleLuy: localeTableLuy, + LocaleLuyKE: localeTableLuyKE, + LocaleLv: localeTableLv, + LocaleLvLV: localeTableLvLV, + LocaleMai: localeTableMai, + LocaleMaiIN: localeTableMaiIN, + LocaleMas: localeTableMas, + LocaleMasKE: localeTableMasKE, + LocaleMasTZ: localeTableMasTZ, + LocaleMer: localeTableMer, + LocaleMerKE: localeTableMerKE, + LocaleMfe: localeTableMfe, + LocaleMfeMU: localeTableMfeMU, + LocaleMg: localeTableMg, + LocaleMgMG: localeTableMgMG, + LocaleMgh: localeTableMgh, + LocaleMghMZ: localeTableMghMZ, + LocaleMgo: localeTableMgo, + LocaleMgoCM: localeTableMgoCM, + LocaleMi: localeTableMi, + LocaleMiNZ: localeTableMiNZ, + LocaleMk: localeTableMk, + LocaleMkMK: localeTableMkMK, + LocaleMl: localeTableMl, + LocaleMlIN: localeTableMlIN, + LocaleMn: localeTableMn, + LocaleMnMN: localeTableMnMN, + LocaleMnMongMN: localeTableMnMongMN, + LocaleMni: localeTableMni, + LocaleMniBeng: localeTableMniBeng, + LocaleMniBengIN: localeTableMniBengIN, + LocaleMr: localeTableMr, + LocaleMrIN: localeTableMrIN, + LocaleMs: localeTableMs, + LocaleMsArab: localeTableMsArab, + LocaleMsArabBN: localeTableMsArabBN, + LocaleMsArabMY: localeTableMsArabMY, + LocaleMsBN: localeTableMsBN, + LocaleMsID: localeTableMsID, + LocaleMsMY: localeTableMsMY, + LocaleMsSG: localeTableMsSG, + LocaleMt: localeTableMt, + LocaleMtMT: localeTableMtMT, + LocaleMua: localeTableMua, + LocaleMuaCM: localeTableMuaCM, + LocaleMus: localeTableMus, + LocaleMusUS: localeTableMusUS, + LocaleMy: localeTableMy, + LocaleMyMM: localeTableMyMM, + LocaleMyv: localeTableMyv, + LocaleMyvRU: localeTableMyvRU, + LocaleMzn: localeTableMzn, + LocaleMznIR: localeTableMznIR, + LocaleNaq: localeTableNaq, + LocaleNaqNA: localeTableNaqNA, + LocaleNd: localeTableNd, + LocaleNdZW: localeTableNdZW, + LocaleNds: localeTableNds, + LocaleNdsDE: localeTableNdsDE, + LocaleNdsNL: localeTableNdsNL, + LocaleNe: localeTableNe, + LocaleNeIN: localeTableNeIN, + LocaleNeNP: localeTableNeNP, + LocaleNl: localeTableNl, + LocaleNlAW: localeTableNlAW, + LocaleNlBE: localeTableNlBE, + LocaleNlBQ: localeTableNlBQ, + LocaleNlCW: localeTableNlCW, + LocaleNlNL: localeTableNlNL, + LocaleNlSR: localeTableNlSR, + LocaleNlSX: localeTableNlSX, + LocaleNmg: localeTableNmg, + LocaleNmgCM: localeTableNmgCM, + LocaleNn: localeTableNn, + LocaleNnNO: localeTableNnNO, + LocaleNnh: localeTableNnh, + LocaleNnhCM: localeTableNnhCM, + LocaleNo: localeTableNo, + LocaleNqo: localeTableNqo, + LocaleNqoGN: localeTableNqoGN, + LocaleNr: localeTableNr, + LocaleNrZA: localeTableNrZA, + LocaleNso: localeTableNso, + LocaleNsoZA: localeTableNsoZA, + LocaleNus: localeTableNus, + LocaleNusSS: localeTableNusSS, + LocaleNy: localeTableNy, + LocaleNyMW: localeTableNyMW, + LocaleNyn: localeTableNyn, + LocaleNynUG: localeTableNynUG, + LocaleOc: localeTableOc, + LocaleOcES: localeTableOcES, + LocaleOcFR: localeTableOcFR, + LocaleOm: localeTableOm, + LocaleOmET: localeTableOmET, + LocaleOmKE: localeTableOmKE, + LocaleOr: localeTableOr, + LocaleOrIN: localeTableOrIN, + LocaleOs: localeTableOs, + LocaleOsGE: localeTableOsGE, + LocaleOsRU: localeTableOsRU, + LocaleOsa: localeTableOsa, + LocaleOsaUS: localeTableOsaUS, + LocalePa: localeTablePa, + LocalePaArab: localeTablePaArab, + LocalePaArabPK: localeTablePaArabPK, + LocalePaGuru: localeTablePaGuru, + LocalePaGuruIN: localeTablePaGuruIN, + LocalePap: localeTablePap, + LocalePapAW: localeTablePapAW, + LocalePapCW: localeTablePapCW, + LocalePcm: localeTablePcm, + LocalePcmNG: localeTablePcmNG, + LocalePis: localeTablePis, + LocalePisSB: localeTablePisSB, + LocalePl: localeTablePl, + LocalePlPL: localeTablePlPL, + LocalePrg: localeTablePrg, + LocalePrgPL: localeTablePrgPL, + LocalePs: localeTablePs, + LocalePsAF: localeTablePsAF, + LocalePsPK: localeTablePsPK, + LocalePt: localeTablePt, + LocalePtAO: localeTablePtAO, + LocalePtBR: localeTablePtBR, + LocalePtCH: localeTablePtCH, + LocalePtCV: localeTablePtCV, + LocalePtGQ: localeTablePtGQ, + LocalePtGW: localeTablePtGW, + LocalePtLU: localeTablePtLU, + LocalePtMO: localeTablePtMO, + LocalePtMZ: localeTablePtMZ, + LocalePtPT: localeTablePtPT, + LocalePtST: localeTablePtST, + LocalePtTL: localeTablePtTL, + LocaleQu: localeTableQu, + LocaleQuBO: localeTableQuBO, + LocaleQuEC: localeTableQuEC, + LocaleQuPE: localeTableQuPE, + LocaleRaj: localeTableRaj, + LocaleRajIN: localeTableRajIN, + LocaleRif: localeTableRif, + LocaleRifMA: localeTableRifMA, + LocaleRm: localeTableRm, + LocaleRmCH: localeTableRmCH, + LocaleRn: localeTableRn, + LocaleRnBI: localeTableRnBI, + LocaleRo: localeTableRo, + LocaleRoMD: localeTableRoMD, + LocaleRoRO: localeTableRoRO, + LocaleRof: localeTableRof, + LocaleRofTZ: localeTableRofTZ, + LocaleRu: localeTableRu, + LocaleRuBY: localeTableRuBY, + LocaleRuKG: localeTableRuKG, + LocaleRuKZ: localeTableRuKZ, + LocaleRuMD: localeTableRuMD, + LocaleRuRU: localeTableRuRU, + LocaleRuUA: localeTableRuUA, + LocaleRw: localeTableRw, + LocaleRwRW: localeTableRwRW, + LocaleRwk: localeTableRwk, + LocaleRwkTZ: localeTableRwkTZ, + LocaleSa: localeTableSa, + LocaleSaIN: localeTableSaIN, + LocaleSah: localeTableSah, + LocaleSahRU: localeTableSahRU, + LocaleSaq: localeTableSaq, + LocaleSaqKE: localeTableSaqKE, + LocaleSat: localeTableSat, + LocaleSbp: localeTableSbp, + LocaleSbpTZ: localeTableSbpTZ, + LocaleSc: localeTableSc, + LocaleScIT: localeTableScIT, + LocaleScn: localeTableScn, + LocaleScnIT: localeTableScnIT, + LocaleSd: localeTableSd, + LocaleSdArab: localeTableSdArab, + LocaleSdArabPK: localeTableSdArabPK, + LocaleSdDeva: localeTableSdDeva, + LocaleSdDevaIN: localeTableSdDevaIN, + LocaleSe: localeTableSe, + LocaleSeFI: localeTableSeFI, + LocaleSeNO: localeTableSeNO, + LocaleSeSE: localeTableSeSE, + LocaleSeh: localeTableSeh, + LocaleSehMZ: localeTableSehMZ, + LocaleSes: localeTableSes, + LocaleSesML: localeTableSesML, + LocaleSg: localeTableSg, + LocaleSgCF: localeTableSgCF, + LocaleShi: localeTableShi, + LocaleShiLatn: localeTableShiLatn, + LocaleShiLatnMA: localeTableShiLatnMA, + LocaleShiTfng: localeTableShiTfng, + LocaleShiTfngMA: localeTableShiTfngMA, + LocaleSi: localeTableSi, + LocaleSiLK: localeTableSiLK, + LocaleSid: localeTableSid, + LocaleSidET: localeTableSidET, + LocaleSk: localeTableSk, + LocaleSkSK: localeTableSkSK, + LocaleSkr: localeTableSkr, + LocaleSkrPK: localeTableSkrPK, + LocaleSl: localeTableSl, + LocaleSlSI: localeTableSlSI, + LocaleSmn: localeTableSmn, + LocaleSmnFI: localeTableSmnFI, + LocaleSn: localeTableSn, + LocaleSnZW: localeTableSnZW, + LocaleSo: localeTableSo, + LocaleSoDJ: localeTableSoDJ, + LocaleSoET: localeTableSoET, + LocaleSoKE: localeTableSoKE, + LocaleSoSO: localeTableSoSO, + LocaleSq: localeTableSq, + LocaleSqAL: localeTableSqAL, + LocaleSqMK: localeTableSqMK, + LocaleSqXK: localeTableSqXK, + LocaleSr: localeTableSr, + LocaleSrCyrl: localeTableSrCyrl, + LocaleSrCyrlBA: localeTableSrCyrlBA, + LocaleSrCyrlME: localeTableSrCyrlME, + LocaleSrCyrlRS: localeTableSrCyrlRS, + LocaleSrCyrlXK: localeTableSrCyrlXK, + LocaleSrLatn: localeTableSrLatn, + LocaleSrLatnBA: localeTableSrLatnBA, + LocaleSrLatnME: localeTableSrLatnME, + LocaleSrLatnRS: localeTableSrLatnRS, + LocaleSrLatnXK: localeTableSrLatnXK, + LocaleSs: localeTableSs, + LocaleSsSZ: localeTableSsSZ, + LocaleSsZA: localeTableSsZA, + LocaleSsy: localeTableSsy, + LocaleSsyER: localeTableSsyER, + LocaleSt: localeTableSt, + LocaleStLS: localeTableStLS, + LocaleStZA: localeTableStZA, + LocaleSu: localeTableSu, + LocaleSuLatn: localeTableSuLatn, + LocaleSuLatnID: localeTableSuLatnID, + LocaleSv: localeTableSv, + LocaleSvAX: localeTableSvAX, + LocaleSvFI: localeTableSvFI, + LocaleSvSE: localeTableSvSE, + LocaleSw: localeTableSw, + LocaleSwCD: localeTableSwCD, + LocaleSwKE: localeTableSwKE, + LocaleSwTZ: localeTableSwTZ, + LocaleSwUG: localeTableSwUG, + LocaleSyr: localeTableSyr, + LocaleSyrIQ: localeTableSyrIQ, + LocaleSyrSY: localeTableSyrSY, + LocaleSzl: localeTableSzl, + LocaleSzlPL: localeTableSzlPL, + LocaleTa: localeTableTa, + LocaleTaIN: localeTableTaIN, + LocaleTaLK: localeTableTaLK, + LocaleTaMY: localeTableTaMY, + LocaleTaSG: localeTableTaSG, + LocaleTe: localeTableTe, + LocaleTeIN: localeTableTeIN, + LocaleTeo: localeTableTeo, + LocaleTeoKE: localeTableTeoKE, + LocaleTeoUG: localeTableTeoUG, + LocaleTg: localeTableTg, + LocaleTgTJ: localeTableTgTJ, + LocaleTh: localeTableTh, + LocaleThTH: localeTableThTH, + LocaleTi: localeTableTi, + LocaleTiER: localeTableTiER, + LocaleTiET: localeTableTiET, + LocaleTig: localeTableTig, + LocaleTigER: localeTableTigER, + LocaleTk: localeTableTk, + LocaleTkTM: localeTableTkTM, + LocaleTn: localeTableTn, + LocaleTnBW: localeTableTnBW, + LocaleTnZA: localeTableTnZA, + LocaleTo: localeTableTo, + LocaleToTO: localeTableToTO, + LocaleTok: localeTableTok, + LocaleTok001: localeTableTok001, + LocaleTpi: localeTableTpi, + LocaleTpiPG: localeTableTpiPG, + LocaleTr: localeTableTr, + LocaleTrCY: localeTableTrCY, + LocaleTrTR: localeTableTrTR, + LocaleTrv: localeTableTrv, + LocaleTrvTW: localeTableTrvTW, + LocaleTrw: localeTableTrw, + LocaleTrwPK: localeTableTrwPK, + LocaleTs: localeTableTs, + LocaleTsZA: localeTableTsZA, + LocaleTt: localeTableTt, + LocaleTtRU: localeTableTtRU, + LocaleTwq: localeTableTwq, + LocaleTwqNE: localeTableTwqNE, + LocaleTzm: localeTableTzm, + LocaleTzmMA: localeTableTzmMA, + LocaleUg: localeTableUg, + LocaleUgCN: localeTableUgCN, + LocaleUk: localeTableUk, + LocaleUkUA: localeTableUkUA, + LocaleUnd: localeTableUnd, + LocaleUr: localeTableUr, + LocaleUrIN: localeTableUrIN, + LocaleUrPK: localeTableUrPK, + LocaleUz: localeTableUz, + LocaleUzArab: localeTableUzArab, + LocaleUzArabAF: localeTableUzArabAF, + LocaleUzCyrl: localeTableUzCyrl, + LocaleUzCyrlUZ: localeTableUzCyrlUZ, + LocaleUzLatn: localeTableUzLatn, + LocaleUzLatnUZ: localeTableUzLatnUZ, + LocaleVai: localeTableVai, + LocaleVaiLatn: localeTableVaiLatn, + LocaleVaiLatnLR: localeTableVaiLatnLR, + LocaleVaiVaii: localeTableVaiVaii, + LocaleVaiVaiiLR: localeTableVaiVaiiLR, + LocaleVe: localeTableVe, + LocaleVeZA: localeTableVeZA, + LocaleVec: localeTableVec, + LocaleVecIT: localeTableVecIT, + LocaleVi: localeTableVi, + LocaleViVN: localeTableViVN, + LocaleVmw: localeTableVmw, + LocaleVmwMZ: localeTableVmwMZ, + LocaleVo: localeTableVo, + LocaleVo001: localeTableVo001, + LocaleVun: localeTableVun, + LocaleVunTZ: localeTableVunTZ, + LocaleWae: localeTableWae, + LocaleWaeCH: localeTableWaeCH, + LocaleWal: localeTableWal, + LocaleWalET: localeTableWalET, + LocaleWo: localeTableWo, + LocaleWoSN: localeTableWoSN, + LocaleXh: localeTableXh, + LocaleXhZA: localeTableXhZA, + LocaleXnr: localeTableXnr, + LocaleXnrIN: localeTableXnrIN, + LocaleXog: localeTableXog, + LocaleXogUG: localeTableXogUG, + LocaleYav: localeTableYav, + LocaleYavCM: localeTableYavCM, + LocaleYi: localeTableYi, + LocaleYiUA: localeTableYiUA, + LocaleYo: localeTableYo, + LocaleYoBJ: localeTableYoBJ, + LocaleYoNG: localeTableYoNG, + LocaleYrl: localeTableYrl, + LocaleYrlBR: localeTableYrlBR, + LocaleYrlCO: localeTableYrlCO, + LocaleYrlVE: localeTableYrlVE, + LocaleYue: localeTableYue, + LocaleYueHans: localeTableYueHans, + LocaleYueHansCN: localeTableYueHansCN, + LocaleYueHant: localeTableYueHant, + LocaleYueHantHK: localeTableYueHantHK, + LocaleZa: localeTableZa, + LocaleZaCN: localeTableZaCN, + LocaleZgh: localeTableZgh, + LocaleZghMA: localeTableZghMA, + LocaleZh: localeTableZh, + LocaleZhHans: localeTableZhHans, + LocaleZhHansCN: localeTableZhHansCN, + LocaleZhHansHK: localeTableZhHansHK, + LocaleZhHansMO: localeTableZhHansMO, + LocaleZhHansSG: localeTableZhHansSG, + LocaleZhHant: localeTableZhHant, + LocaleZhHantHK: localeTableZhHantHK, + LocaleZhHantMO: localeTableZhHantMO, + LocaleZhHantTW: localeTableZhHantTW, + LocaleZu: localeTableZu, + LocaleZuZA: localeTableZuZA, +} + +const ( + shortDayNamesField = iota + longDayNamesField + shortMonthNamesField + longMonthNamesField + dayPeriodsField +) diff --git a/vendor/github.com/expr-lang/expr/README.md b/vendor/github.com/expr-lang/expr/README.md index bd34c7d2487..6c56c67b675 100644 --- a/vendor/github.com/expr-lang/expr/README.md +++ b/vendor/github.com/expr-lang/expr/README.md @@ -162,6 +162,9 @@ func main() { * [Visually.io](https://visually.io) employs Expr as a business rule engine for its personalization targeting algorithm. * [Akvorado](https://github.com/akvorado/akvorado) utilizes Expr to classify exporters and interfaces in network flows. * [keda.sh](https://keda.sh) uses Expr to allow customization of its Kubernetes-based event-driven autoscaling. +* [Span Digital](https://spandigital.com/) uses Expr in it's Knowledge Management products. +* [Xiaohongshu](https://www.xiaohongshu.com/) combining yaml with Expr for dynamically policies delivery. +* [Melrōse](https://melrōse.org) uses Expr to implement its music programming language. [Add your company too](https://github.com/expr-lang/expr/edit/master/README.md) diff --git a/vendor/github.com/expr-lang/expr/ast/print.go b/vendor/github.com/expr-lang/expr/ast/print.go index fa593ae28b1..6a7d698a99f 100644 --- a/vendor/github.com/expr-lang/expr/ast/print.go +++ b/vendor/github.com/expr-lang/expr/ast/print.go @@ -65,8 +65,7 @@ func (n *BinaryNode) String() string { var lhs, rhs string var lwrap, rwrap bool - lb, ok := n.Left.(*BinaryNode) - if ok { + if lb, ok := n.Left.(*BinaryNode); ok { if operator.Less(lb.Operator, n.Operator) { lwrap = true } @@ -77,9 +76,7 @@ func (n *BinaryNode) String() string { lwrap = true } } - - rb, ok := n.Right.(*BinaryNode) - if ok { + if rb, ok := n.Right.(*BinaryNode); ok { if operator.Less(rb.Operator, n.Operator) { rwrap = true } @@ -88,6 +85,13 @@ func (n *BinaryNode) String() string { } } + if _, ok := n.Left.(*ConditionalNode); ok { + lwrap = true + } + if _, ok := n.Right.(*ConditionalNode); ok { + rwrap = true + } + if lwrap { lhs = fmt.Sprintf("(%s)", n.Left.String()) } else { @@ -108,20 +112,25 @@ func (n *ChainNode) String() string { } func (n *MemberNode) String() string { + node := n.Node.String() + if _, ok := n.Node.(*BinaryNode); ok { + node = fmt.Sprintf("(%s)", node) + } + if n.Optional { if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) { - return fmt.Sprintf("%s?.%s", n.Node.String(), str.Value) + return fmt.Sprintf("%s?.%s", node, str.Value) } else { - return fmt.Sprintf("%s?.[%s]", n.Node.String(), n.Property.String()) + return fmt.Sprintf("%s?.[%s]", node, n.Property.String()) } } if str, ok := n.Property.(*StringNode); ok && utils.IsValidIdentifier(str.Value) { if _, ok := n.Node.(*PointerNode); ok { return fmt.Sprintf(".%s", str.Value) } - return fmt.Sprintf("%s.%s", n.Node.String(), str.Value) + return fmt.Sprintf("%s.%s", node, str.Value) } - return fmt.Sprintf("%s[%s]", n.Node.String(), n.Property.String()) + return fmt.Sprintf("%s[%s]", node, n.Property.String()) } func (n *SliceNode) String() string { @@ -202,5 +211,11 @@ func (n *MapNode) String() string { } func (n *PairNode) String() string { - return fmt.Sprintf("%s: %s", n.Key.String(), n.Value.String()) + if str, ok := n.Key.(*StringNode); ok { + if utils.IsValidIdentifier(str.Value) { + return fmt.Sprintf("%s: %s", str.Value, n.Value.String()) + } + return fmt.Sprintf("%s: %s", str.String(), n.Value.String()) + } + return fmt.Sprintf("(%s): %s", n.Key.String(), n.Value.String()) } diff --git a/vendor/github.com/expr-lang/expr/ast/visitor.go b/vendor/github.com/expr-lang/expr/ast/visitor.go index 287a7558967..90bc9f1d0ee 100644 --- a/vendor/github.com/expr-lang/expr/ast/visitor.go +++ b/vendor/github.com/expr-lang/expr/ast/visitor.go @@ -7,6 +7,9 @@ type Visitor interface { } func Walk(node *Node, v Visitor) { + if *node == nil { + return + } switch n := (*node).(type) { case *NilNode: case *IdentifierNode: diff --git a/vendor/github.com/expr-lang/expr/builtin/builtin.go b/vendor/github.com/expr-lang/expr/builtin/builtin.go index 7bf377df224..cc6f197cdf2 100644 --- a/vendor/github.com/expr-lang/expr/builtin/builtin.go +++ b/vendor/github.com/expr-lang/expr/builtin/builtin.go @@ -83,6 +83,11 @@ var Builtins = []*Function{ Predicate: true, Types: types(new(func([]any, func(any) bool) int)), }, + { + Name: "sum", + Predicate: true, + Types: types(new(func([]any, func(any) bool) int)), + }, { Name: "groupBy", Predicate: true, @@ -387,13 +392,6 @@ var Builtins = []*Function{ return validateAggregateFunc("min", args) }, }, - { - Name: "sum", - Func: sum, - Validate: func(args []reflect.Type) (reflect.Type, error) { - return validateAggregateFunc("sum", args) - }, - }, { Name: "mean", Func: func(args ...any) (any, error) { @@ -474,9 +472,27 @@ var Builtins = []*Function{ { Name: "now", Func: func(args ...any) (any, error) { - return time.Now(), nil + if len(args) == 0 { + return time.Now(), nil + } + if len(args) == 1 { + if tz, ok := args[0].(*time.Location); ok { + return time.Now().In(tz), nil + } + } + return nil, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) + }, + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) == 0 { + return timeType, nil + } + if len(args) == 1 { + if args[0] != nil && args[0].AssignableTo(locationType) { + return timeType, nil + } + } + return anyType, fmt.Errorf("invalid number of arguments (expected 0, got %d)", len(args)) }, - Types: types(new(func() time.Time)), }, { Name: "duration", @@ -488,9 +504,17 @@ var Builtins = []*Function{ { Name: "date", Func: func(args ...any) (any, error) { + tz, ok := args[0].(*time.Location) + if ok { + args = args[1:] + } + date := args[0].(string) if len(args) == 2 { layout := args[1].(string) + if tz != nil { + return time.ParseInLocation(layout, date, tz) + } return time.Parse(layout, date) } if len(args) == 3 { @@ -517,18 +541,43 @@ var Builtins = []*Function{ time.RFC1123, } for _, layout := range layouts { - t, err := time.Parse(layout, date) - if err == nil { - return t, nil + if tz == nil { + t, err := time.Parse(layout, date) + if err == nil { + return t, nil + } + } else { + t, err := time.ParseInLocation(layout, date, tz) + if err == nil { + return t, nil + } } } return nil, fmt.Errorf("invalid date %s", date) }, - Types: types( - new(func(string) time.Time), - new(func(string, string) time.Time), - new(func(string, string, string) time.Time), - ), + Validate: func(args []reflect.Type) (reflect.Type, error) { + if len(args) < 1 { + return anyType, fmt.Errorf("invalid number of arguments (expected at least 1, got %d)", len(args)) + } + if args[0] != nil && args[0].AssignableTo(locationType) { + args = args[1:] + } + if len(args) > 3 { + return anyType, fmt.Errorf("invalid number of arguments (expected at most 3, got %d)", len(args)) + } + return timeType, nil + }, + }, + { + Name: "timezone", + Func: func(args ...any) (any, error) { + tz, err := time.LoadLocation(args[0].(string)) + if err != nil { + return nil, err + } + return tz, nil + }, + Types: types(time.LoadLocation), }, { Name: "first", diff --git a/vendor/github.com/expr-lang/expr/builtin/lib.go b/vendor/github.com/expr-lang/expr/builtin/lib.go index e3a6c0aef91..e3cd61b968f 100644 --- a/vendor/github.com/expr-lang/expr/builtin/lib.go +++ b/vendor/github.com/expr-lang/expr/builtin/lib.go @@ -258,45 +258,6 @@ func String(arg any) any { return fmt.Sprintf("%v", arg) } -func sum(args ...any) (any, error) { - var total int - var fTotal float64 - - for _, arg := range args { - rv := reflect.ValueOf(deref.Deref(arg)) - - switch rv.Kind() { - case reflect.Array, reflect.Slice: - size := rv.Len() - for i := 0; i < size; i++ { - elemSum, err := sum(rv.Index(i).Interface()) - if err != nil { - return nil, err - } - switch elemSum := elemSum.(type) { - case int: - total += elemSum - case float64: - fTotal += elemSum - } - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - total += int(rv.Int()) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - total += int(rv.Uint()) - case reflect.Float32, reflect.Float64: - fTotal += rv.Float() - default: - return nil, fmt.Errorf("invalid argument for sum (type %T)", arg) - } - } - - if fTotal != 0.0 { - return fTotal + float64(total), nil - } - return total, nil -} - func minMax(name string, fn func(any, any) bool, args ...any) (any, error) { var val any for _, arg := range args { diff --git a/vendor/github.com/expr-lang/expr/builtin/utils.go b/vendor/github.com/expr-lang/expr/builtin/utils.go index 7d3b6ee8e70..29a95731a05 100644 --- a/vendor/github.com/expr-lang/expr/builtin/utils.go +++ b/vendor/github.com/expr-lang/expr/builtin/utils.go @@ -3,14 +3,17 @@ package builtin import ( "fmt" "reflect" + "time" ) var ( - anyType = reflect.TypeOf(new(any)).Elem() - integerType = reflect.TypeOf(0) - floatType = reflect.TypeOf(float64(0)) - arrayType = reflect.TypeOf([]any{}) - mapType = reflect.TypeOf(map[any]any{}) + anyType = reflect.TypeOf(new(any)).Elem() + integerType = reflect.TypeOf(0) + floatType = reflect.TypeOf(float64(0)) + arrayType = reflect.TypeOf([]any{}) + mapType = reflect.TypeOf(map[any]any{}) + timeType = reflect.TypeOf(new(time.Time)).Elem() + locationType = reflect.TypeOf(new(time.Location)) ) func kind(t reflect.Type) reflect.Kind { diff --git a/vendor/github.com/expr-lang/expr/checker/checker.go b/vendor/github.com/expr-lang/expr/checker/checker.go index b46178d43a5..c71a98f07e7 100644 --- a/vendor/github.com/expr-lang/expr/checker/checker.go +++ b/vendor/github.com/expr-lang/expr/checker/checker.go @@ -13,6 +13,45 @@ import ( "github.com/expr-lang/expr/parser" ) +// ParseCheck parses input expression and checks its types. Also, it applies +// all provided patchers. In case of error, it returns error with a tree. +func ParseCheck(input string, config *conf.Config) (*parser.Tree, error) { + tree, err := parser.ParseWithConfig(input, config) + if err != nil { + return tree, err + } + + if len(config.Visitors) > 0 { + for i := 0; i < 1000; i++ { + more := false + for _, v := range config.Visitors { + // We need to perform types check, because some visitors may rely on + // types information available in the tree. + _, _ = Check(tree, config) + + ast.Walk(&tree.Node, v) + + if v, ok := v.(interface { + ShouldRepeat() bool + }); ok { + more = more || v.ShouldRepeat() + } + } + if !more { + break + } + } + } + _, err = Check(tree, config) + if err != nil { + return tree, err + } + + return tree, nil +} + +// Check checks types of the expression tree. It returns type of the expression +// and error if any. If config is nil, then default configuration will be used. func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) { if config == nil { config = conf.New(nil) @@ -653,6 +692,10 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) } + if len(node.Arguments) == 1 { + return integerType, info{} + } + v.begin(collection) closure, _ := v.visit(node.Arguments[1]) v.end() @@ -668,6 +711,29 @@ func (v *checker) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { } return v.error(node.Arguments[1], "predicate should has one input and one output param") + case "sum": + collection, _ := v.visit(node.Arguments[0]) + if !isArray(collection) && !isAny(collection) { + return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) + } + + if len(node.Arguments) == 2 { + v.begin(collection) + closure, _ := v.visit(node.Arguments[1]) + v.end() + + if isFunc(closure) && + closure.NumOut() == 1 && + closure.NumIn() == 1 && isAny(closure.In(0)) { + return closure.Out(0), info{} + } + } else { + if isAny(collection) { + return anyType, info{} + } + return collection.Elem(), info{} + } + case "find", "findLast": collection, _ := v.visit(node.Arguments[0]) if !isArray(collection) && !isAny(collection) { @@ -978,7 +1044,7 @@ func (v *checker) checkArguments( continue } - if !t.AssignableTo(in) && kind(t) != reflect.Interface { + if !(t.AssignableTo(in) || deref.Type(t).AssignableTo(in)) && kind(t) != reflect.Interface { return anyType, &file.Error{ Location: arg.Location(), Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name), @@ -1012,9 +1078,11 @@ func traverseAndReplaceIntegerNodesWithIntegerNodes(node *ast.Node, newType refl case *ast.IntegerNode: (*node).SetType(newType) case *ast.UnaryNode: + (*node).SetType(newType) unaryNode := (*node).(*ast.UnaryNode) traverseAndReplaceIntegerNodesWithIntegerNodes(&unaryNode.Node, newType) case *ast.BinaryNode: + // TODO: Binary node return type is dependent on the type of the operands. We can't just change the type of the node. binaryNode := (*node).(*ast.BinaryNode) switch binaryNode.Operator { case "+", "-", "*": diff --git a/vendor/github.com/expr-lang/expr/compiler/compiler.go b/vendor/github.com/expr-lang/expr/compiler/compiler.go index 808b53c9b74..720f6a26528 100644 --- a/vendor/github.com/expr-lang/expr/compiler/compiler.go +++ b/vendor/github.com/expr-lang/expr/compiler/compiler.go @@ -2,6 +2,7 @@ package compiler import ( "fmt" + "math" "reflect" "regexp" @@ -92,6 +93,13 @@ type scope struct { index int } +func (c *compiler) nodeParent() ast.Node { + if len(c.nodes) > 1 { + return c.nodes[len(c.nodes)-2] + } + return nil +} + func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int { c.bytecode = append(c.bytecode, op) current := len(c.bytecode) @@ -322,22 +330,46 @@ func (c *compiler) IntegerNode(node *ast.IntegerNode) { case reflect.Int: c.emitPush(node.Value) case reflect.Int8: + if node.Value > math.MaxInt8 || node.Value < math.MinInt8 { + panic(fmt.Sprintf("constant %d overflows int8", node.Value)) + } c.emitPush(int8(node.Value)) case reflect.Int16: + if node.Value > math.MaxInt16 || node.Value < math.MinInt16 { + panic(fmt.Sprintf("constant %d overflows int16", node.Value)) + } c.emitPush(int16(node.Value)) case reflect.Int32: + if node.Value > math.MaxInt32 || node.Value < math.MinInt32 { + panic(fmt.Sprintf("constant %d overflows int32", node.Value)) + } c.emitPush(int32(node.Value)) case reflect.Int64: c.emitPush(int64(node.Value)) case reflect.Uint: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint", node.Value)) + } c.emitPush(uint(node.Value)) case reflect.Uint8: + if node.Value > math.MaxUint8 || node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint8", node.Value)) + } c.emitPush(uint8(node.Value)) case reflect.Uint16: + if node.Value > math.MaxUint16 || node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint16", node.Value)) + } c.emitPush(uint16(node.Value)) case reflect.Uint32: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint32", node.Value)) + } c.emitPush(uint32(node.Value)) case reflect.Uint64: + if node.Value < 0 { + panic(fmt.Sprintf("constant %d overflows uint64", node.Value)) + } c.emitPush(uint64(node.Value)) default: c.emitPush(node.Value) @@ -395,34 +427,12 @@ func (c *compiler) UnaryNode(node *ast.UnaryNode) { } func (c *compiler) BinaryNode(node *ast.BinaryNode) { - l := kind(node.Left) - r := kind(node.Right) - - leftIsSimple := isSimpleType(node.Left) - rightIsSimple := isSimpleType(node.Right) - leftAndRightAreSimple := leftIsSimple && rightIsSimple - switch node.Operator { case "==": - c.compile(node.Left) - c.derefInNeeded(node.Left) - c.compile(node.Right) - c.derefInNeeded(node.Right) - - if l == r && l == reflect.Int && leftAndRightAreSimple { - c.emit(OpEqualInt) - } else if l == r && l == reflect.String && leftAndRightAreSimple { - c.emit(OpEqualString) - } else { - c.emit(OpEqual) - } + c.equalBinaryNode(node) case "!=": - c.compile(node.Left) - c.derefInNeeded(node.Left) - c.compile(node.Right) - c.derefInNeeded(node.Right) - c.emit(OpEqual) + c.equalBinaryNode(node) c.emit(OpNot) case "or", "||": @@ -580,6 +590,28 @@ func (c *compiler) BinaryNode(node *ast.BinaryNode) { } } +func (c *compiler) equalBinaryNode(node *ast.BinaryNode) { + l := kind(node.Left.Type()) + r := kind(node.Right.Type()) + + leftIsSimple := isSimpleType(node.Left) + rightIsSimple := isSimpleType(node.Right) + leftAndRightAreSimple := leftIsSimple && rightIsSimple + + c.compile(node.Left) + c.derefInNeeded(node.Left) + c.compile(node.Right) + c.derefInNeeded(node.Right) + + if l == r && l == reflect.Int && leftAndRightAreSimple { + c.emit(OpEqualInt) + } else if l == r && l == reflect.String && leftAndRightAreSimple { + c.emit(OpEqualString) + } else { + c.emit(OpEqual) + } +} + func isSimpleType(node ast.Node) bool { if node == nil { return false @@ -594,9 +626,21 @@ func isSimpleType(node ast.Node) bool { func (c *compiler) ChainNode(node *ast.ChainNode) { c.chains = append(c.chains, []int{}) c.compile(node.Node) - // Chain activate (got nit somewhere) for _, ph := range c.chains[len(c.chains)-1] { - c.patchJump(ph) + c.patchJump(ph) // If chain activated jump here (got nit somewhere). + } + parent := c.nodeParent() + if binary, ok := parent.(*ast.BinaryNode); ok && binary.Operator == "??" { + // If chain is used in nil coalescing operator, we can omit + // nil push at the end of the chain. The ?? operator will + // handle it. + } else { + // We need to put the nil on the stack, otherwise "typed" + // nil will be used as a result of the chain. + j := c.emit(OpJumpIfNotNil, placeholder) + c.emit(OpPop) + c.emit(OpNil) + c.patchJump(j) } c.chains = c.chains[:len(c.chains)-1] } @@ -682,9 +726,44 @@ func (c *compiler) SliceNode(node *ast.SliceNode) { } func (c *compiler) CallNode(node *ast.CallNode) { - for _, arg := range node.Arguments { - c.compile(arg) + fn := node.Callee.Type() + if kind(fn) == reflect.Func { + fnInOffset := 0 + fnNumIn := fn.NumIn() + switch callee := node.Callee.(type) { + case *ast.MemberNode: + if prop, ok := callee.Property.(*ast.StringNode); ok { + if _, ok = callee.Node.Type().MethodByName(prop.Value); ok && callee.Node.Type().Kind() != reflect.Interface { + fnInOffset = 1 + fnNumIn-- + } + } + case *ast.IdentifierNode: + if t, ok := c.config.Types[callee.Value]; ok && t.Method { + fnInOffset = 1 + fnNumIn-- + } + } + for i, arg := range node.Arguments { + c.compile(arg) + if k := kind(arg.Type()); k == reflect.Ptr || k == reflect.Interface { + var in reflect.Type + if fn.IsVariadic() && i >= fnNumIn-1 { + in = fn.In(fn.NumIn() - 1).Elem() + } else { + in = fn.In(i + fnInOffset) + } + if k = kind(in); k != reflect.Ptr && k != reflect.Interface { + c.emit(OpDeref) + } + } + } + } else { + for _, arg := range node.Arguments { + c.compile(arg) + } } + if ident, ok := node.Callee.(*ast.IdentifierNode); ok { if c.config != nil { if fn, ok := c.config.Functions[ident.Value]; ok { @@ -800,7 +879,11 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.compile(node.Arguments[0]) c.emit(OpBegin) c.emitLoop(func() { - c.compile(node.Arguments[1]) + if len(node.Arguments) == 2 { + c.compile(node.Arguments[1]) + } else { + c.emit(OpPointer) + } c.emitCond(func() { c.emit(OpIncrementCount) }) @@ -809,6 +892,25 @@ func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { c.emit(OpEnd) return + case "sum": + c.compile(node.Arguments[0]) + c.emit(OpBegin) + c.emit(OpInt, 0) + c.emit(OpSetAcc) + c.emitLoop(func() { + if len(node.Arguments) == 2 { + c.compile(node.Arguments[1]) + } else { + c.emit(OpPointer) + } + c.emit(OpGetAcc) + c.emit(OpAdd) + c.emit(OpSetAcc) + }) + c.emit(OpGetAcc) + c.emit(OpEnd) + return + case "find": c.compile(node.Arguments[0]) c.emit(OpBegin) @@ -1094,7 +1196,7 @@ func (c *compiler) PairNode(node *ast.PairNode) { } func (c *compiler) derefInNeeded(node ast.Node) { - switch kind(node) { + switch kind(node.Type()) { case reflect.Ptr, reflect.Interface: c.emit(OpDeref) } @@ -1113,8 +1215,7 @@ func (c *compiler) optimize() { } } -func kind(node ast.Node) reflect.Kind { - t := node.Type() +func kind(t reflect.Type) reflect.Kind { if t == nil { return reflect.Invalid } diff --git a/vendor/github.com/expr-lang/expr/expr.go b/vendor/github.com/expr-lang/expr/expr.go index ba786c0174c..8c619e1c4db 100644 --- a/vendor/github.com/expr-lang/expr/expr.go +++ b/vendor/github.com/expr-lang/expr/expr.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "reflect" + "time" "github.com/expr-lang/expr/ast" "github.com/expr-lang/expr/builtin" @@ -12,7 +13,6 @@ import ( "github.com/expr-lang/expr/conf" "github.com/expr-lang/expr/file" "github.com/expr-lang/expr/optimizer" - "github.com/expr-lang/expr/parser" "github.com/expr-lang/expr/patcher" "github.com/expr-lang/expr/vm" ) @@ -183,6 +183,17 @@ func WithContext(name string) Option { }) } +// Timezone sets default timezone for date() and now() builtin functions. +func Timezone(name string) Option { + tz, err := time.LoadLocation(name) + if err != nil { + panic(err) + } + return Patch(patcher.WithTimezone{ + Location: tz, + }) +} + // Compile parses and compiles given input expression to bytecode program. func Compile(input string, ops ...Option) (*vm.Program, error) { config := conf.CreateNew() @@ -194,33 +205,7 @@ func Compile(input string, ops ...Option) (*vm.Program, error) { } config.Check() - tree, err := parser.ParseWithConfig(input, config) - if err != nil { - return nil, err - } - - if len(config.Visitors) > 0 { - for i := 0; i < 1000; i++ { - more := false - for _, v := range config.Visitors { - // We need to perform types check, because some visitors may rely on - // types information available in the tree. - _, _ = checker.Check(tree, config) - - ast.Walk(&tree.Node, v) - - if v, ok := v.(interface { - ShouldRepeat() bool - }); ok { - more = more || v.ShouldRepeat() - } - } - if !more { - break - } - } - } - _, err = checker.Check(tree, config) + tree, err := checker.ParseCheck(input, config) if err != nil { return nil, err } diff --git a/vendor/github.com/expr-lang/expr/file/error.go b/vendor/github.com/expr-lang/expr/file/error.go index edf202b0456..8ff85dfa5f7 100644 --- a/vendor/github.com/expr-lang/expr/file/error.go +++ b/vendor/github.com/expr-lang/expr/file/error.go @@ -8,22 +8,36 @@ import ( type Error struct { Location - Message string - Snippet string - Prev error + Line int `json:"line"` + Column int `json:"column"` + Message string `json:"message"` + Snippet string `json:"snippet"` + Prev error `json:"prev"` } func (e *Error) Error() string { return e.format() } -func (e *Error) Bind(source *Source) *Error { - if snippet, found := source.Snippet(e.Location.Line); found { +func (e *Error) Bind(source Source) *Error { + e.Line = 1 + for i, r := range source { + if i == e.From { + break + } + if r == '\n' { + e.Line++ + e.Column = 0 + } else { + e.Column++ + } + } + if snippet, found := source.Snippet(e.Line); found { snippet := strings.Replace(snippet, "\t", " ", -1) srcLine := "\n | " + snippet var bytes = []byte(snippet) var indLine = "\n | " - for i := 0; i < e.Location.Column && len(bytes) > 0; i++ { + for i := 0; i < e.Column && len(bytes) > 0; i++ { _, sz := utf8.DecodeRune(bytes) bytes = bytes[sz:] if sz > 1 { @@ -54,7 +68,7 @@ func (e *Error) Wrap(err error) { } func (e *Error) format() string { - if e.Location.Empty() { + if e.Snippet == "" { return e.Message } return fmt.Sprintf( diff --git a/vendor/github.com/expr-lang/expr/file/location.go b/vendor/github.com/expr-lang/expr/file/location.go index a92e27f0b1c..6c6bc2427ec 100644 --- a/vendor/github.com/expr-lang/expr/file/location.go +++ b/vendor/github.com/expr-lang/expr/file/location.go @@ -1,10 +1,6 @@ package file type Location struct { - Line int // The 1-based line of the location. - Column int // The 0-based column number of the location. -} - -func (l Location) Empty() bool { - return l.Column == 0 && l.Line == 0 + From int `json:"from"` + To int `json:"to"` } diff --git a/vendor/github.com/expr-lang/expr/file/source.go b/vendor/github.com/expr-lang/expr/file/source.go index d86a546b100..8e2b2d1540c 100644 --- a/vendor/github.com/expr-lang/expr/file/source.go +++ b/vendor/github.com/expr-lang/expr/file/source.go @@ -1,78 +1,47 @@ package file import ( - "encoding/json" "strings" "unicode/utf8" ) -type Source struct { - contents []rune - lineOffsets []int32 -} - -func NewSource(contents string) *Source { - s := &Source{ - contents: []rune(contents), - } - s.updateOffsets() - return s -} - -func (s *Source) MarshalJSON() ([]byte, error) { - return json.Marshal(s.contents) -} - -func (s *Source) UnmarshalJSON(b []byte) error { - contents := make([]rune, 0) - err := json.Unmarshal(b, &contents) - if err != nil { - return err - } +type Source []rune - s.contents = contents - s.updateOffsets() - return nil +func NewSource(contents string) Source { + return []rune(contents) } -func (s *Source) Content() string { - return string(s.contents) +func (s Source) String() string { + return string(s) } -func (s *Source) Snippet(line int) (string, bool) { +func (s Source) Snippet(line int) (string, bool) { if s == nil { return "", false } - charStart, found := s.findLineOffset(line) - if !found || len(s.contents) == 0 { + lines := strings.Split(string(s), "\n") + lineOffsets := make([]int, len(lines)) + var offset int + for i, line := range lines { + offset = offset + utf8.RuneCountInString(line) + 1 + lineOffsets[i] = offset + } + charStart, found := getLineOffset(lineOffsets, line) + if !found || len(s) == 0 { return "", false } - charEnd, found := s.findLineOffset(line + 1) + charEnd, found := getLineOffset(lineOffsets, line+1) if found { - return string(s.contents[charStart : charEnd-1]), true - } - return string(s.contents[charStart:]), true -} - -// updateOffsets compute line offsets up front as they are referred to frequently. -func (s *Source) updateOffsets() { - lines := strings.Split(string(s.contents), "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset + return string(s[charStart : charEnd-1]), true } - s.lineOffsets = offsets + return string(s[charStart:]), true } -// findLineOffset returns the offset where the (1-indexed) line begins, -// or false if line doesn't exist. -func (s *Source) findLineOffset(line int) (int32, bool) { +func getLineOffset(lineOffsets []int, line int) (int, bool) { if line == 1 { return 0, true - } else if line > 1 && line <= len(s.lineOffsets) { - offset := s.lineOffsets[line-2] + } else if line > 1 && line <= len(lineOffsets) { + offset := lineOffsets[line-2] return offset, true } return -1, false diff --git a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go index 6d1fb0b546e..4ceb3fa43a7 100644 --- a/vendor/github.com/expr-lang/expr/optimizer/optimizer.go +++ b/vendor/github.com/expr-lang/expr/optimizer/optimizer.go @@ -37,5 +37,7 @@ func Optimize(node *Node, config *conf.Config) error { Walk(node, &filterLast{}) Walk(node, &filterFirst{}) Walk(node, &predicateCombination{}) + Walk(node, &sumArray{}) + Walk(node, &sumMap{}) return nil } diff --git a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go index 2733781df53..6e8a7f7cfc8 100644 --- a/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go +++ b/vendor/github.com/expr-lang/expr/optimizer/predicate_combination.go @@ -5,6 +5,14 @@ import ( "github.com/expr-lang/expr/parser/operator" ) +/* +predicateCombination is a visitor that combines multiple predicate calls into a single call. +For example, the following expression: + + all(x, x > 1) && all(x, x < 10) -> all(x, x > 1 && x < 10) + any(x, x > 1) || any(x, x < 10) -> any(x, x > 1 || x < 10) + none(x, x > 1) && none(x, x < 10) -> none(x, x > 1 || x < 10) +*/ type predicateCombination struct{} func (v *predicateCombination) Visit(node *Node) { @@ -36,10 +44,12 @@ func (v *predicateCombination) Visit(node *Node) { } func combinedOperator(fn, op string) (string, bool) { - switch fn { - case "all", "any": + switch { + case fn == "all" && (op == "and" || op == "&&"): + return op, true + case fn == "any" && (op == "or" || op == "||"): return op, true - case "one", "none": + case fn == "none" && (op == "and" || op == "&&"): switch op { case "and": return "or", true diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_array.go b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go new file mode 100644 index 00000000000..0a05d1f2e6b --- /dev/null +++ b/vendor/github.com/expr-lang/expr/optimizer/sum_array.go @@ -0,0 +1,37 @@ +package optimizer + +import ( + "fmt" + + . "github.com/expr-lang/expr/ast" +) + +type sumArray struct{} + +func (*sumArray) Visit(node *Node) { + if sumBuiltin, ok := (*node).(*BuiltinNode); ok && + sumBuiltin.Name == "sum" && + len(sumBuiltin.Arguments) == 1 { + if array, ok := sumBuiltin.Arguments[0].(*ArrayNode); ok && + len(array.Nodes) >= 2 { + Patch(node, sumArrayFold(array)) + } + } +} + +func sumArrayFold(array *ArrayNode) *BinaryNode { + if len(array.Nodes) > 2 { + return &BinaryNode{ + Operator: "+", + Left: array.Nodes[0], + Right: sumArrayFold(&ArrayNode{Nodes: array.Nodes[1:]}), + } + } else if len(array.Nodes) == 2 { + return &BinaryNode{ + Operator: "+", + Left: array.Nodes[0], + Right: array.Nodes[1], + } + } + panic(fmt.Errorf("sumArrayFold: invalid array length %d", len(array.Nodes))) +} diff --git a/vendor/github.com/expr-lang/expr/optimizer/sum_map.go b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go new file mode 100644 index 00000000000..a41a537327c --- /dev/null +++ b/vendor/github.com/expr-lang/expr/optimizer/sum_map.go @@ -0,0 +1,25 @@ +package optimizer + +import ( + . "github.com/expr-lang/expr/ast" +) + +type sumMap struct{} + +func (*sumMap) Visit(node *Node) { + if sumBuiltin, ok := (*node).(*BuiltinNode); ok && + sumBuiltin.Name == "sum" && + len(sumBuiltin.Arguments) == 1 { + if mapBuiltin, ok := sumBuiltin.Arguments[0].(*BuiltinNode); ok && + mapBuiltin.Name == "map" && + len(mapBuiltin.Arguments) == 2 { + Patch(node, &BuiltinNode{ + Name: "sum", + Arguments: []Node{ + mapBuiltin.Arguments[0], + mapBuiltin.Arguments[1], + }, + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go index c32658637f2..e6b06c09d09 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/lexer.go @@ -3,20 +3,18 @@ package lexer import ( "fmt" "strings" - "unicode/utf8" "github.com/expr-lang/expr/file" ) -func Lex(source *file.Source) ([]Token, error) { +func Lex(source file.Source) ([]Token, error) { l := &lexer{ - input: source.Content(), + source: source, tokens: make([]Token, 0), + start: 0, + end: 0, } - - l.loc = file.Location{Line: 1, Column: 0} - l.prev = l.loc - l.startLoc = l.loc + l.commit() for state := root; state != nil; { state = state(l) @@ -30,34 +28,25 @@ func Lex(source *file.Source) ([]Token, error) { } type lexer struct { - input string + source file.Source tokens []Token - start, end int // current position in input - width int // last rune width - startLoc file.Location // start location - prev, loc file.Location // prev location of end location, end location + start, end int err *file.Error } const eof rune = -1 +func (l *lexer) commit() { + l.start = l.end +} + func (l *lexer) next() rune { - if l.end >= len(l.input) { - l.width = 0 + if l.end >= len(l.source) { + l.end++ return eof } - r, w := utf8.DecodeRuneInString(l.input[l.end:]) - l.width = w - l.end += w - - l.prev = l.loc - if r == '\n' { - l.loc.Line++ - l.loc.Column = 0 - } else { - l.loc.Column++ - } - + r := l.source[l.end] + l.end++ return r } @@ -68,8 +57,7 @@ func (l *lexer) peek() rune { } func (l *lexer) backup() { - l.end -= l.width - l.loc = l.prev + l.end-- } func (l *lexer) emit(t Kind) { @@ -78,35 +66,39 @@ func (l *lexer) emit(t Kind) { func (l *lexer) emitValue(t Kind, value string) { l.tokens = append(l.tokens, Token{ - Location: l.startLoc, + Location: file.Location{From: l.start, To: l.end}, Kind: t, Value: value, }) - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) emitEOF() { + from := l.end - 2 + if from < 0 { + from = 0 + } + to := l.end - 1 + if to < 0 { + to = 0 + } l.tokens = append(l.tokens, Token{ - Location: l.prev, // Point to previous position for better error messages. + Location: file.Location{From: from, To: to}, Kind: EOF, }) - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) skip() { - l.start = l.end - l.startLoc = l.loc + l.commit() } func (l *lexer) word() string { - return l.input[l.start:l.end] -} - -func (l *lexer) ignore() { - l.start = l.end - l.startLoc = l.loc + // TODO: boundary check is NOT needed here, but for some reason CI fuzz tests are failing. + if l.start > len(l.source) || l.end > len(l.source) { + return "__invalid__" + } + return string(l.source[l.start:l.end]) } func (l *lexer) accept(valid string) bool { @@ -132,18 +124,18 @@ func (l *lexer) skipSpaces() { } func (l *lexer) acceptWord(word string) bool { - pos, loc, prev := l.end, l.loc, l.prev + pos := l.end l.skipSpaces() for _, ch := range word { if l.next() != ch { - l.end, l.loc, l.prev = pos, loc, prev + l.end = pos return false } } if r := l.peek(); r != ' ' && r != eof { - l.end, l.loc, l.prev = pos, loc, prev + l.end = pos return false } @@ -153,8 +145,11 @@ func (l *lexer) acceptWord(word string) bool { func (l *lexer) error(format string, args ...any) stateFn { if l.err == nil { // show first error l.err = &file.Error{ - Location: l.loc, - Message: fmt.Sprintf(format, args...), + Location: file.Location{ + From: l.end - 1, + To: l.end, + }, + Message: fmt.Sprintf(format, args...), } } return nil @@ -230,6 +225,6 @@ func (l *lexer) scanRawString(quote rune) (n int) { ch = l.next() n++ } - l.emitValue(String, l.input[l.start+1:l.end-1]) + l.emitValue(String, string(l.source[l.start+1:l.end-1])) return } diff --git a/vendor/github.com/expr-lang/expr/parser/lexer/state.go b/vendor/github.com/expr-lang/expr/parser/lexer/state.go index 72f02bf4ef7..d351e2f5c8b 100644 --- a/vendor/github.com/expr-lang/expr/parser/lexer/state.go +++ b/vendor/github.com/expr-lang/expr/parser/lexer/state.go @@ -14,7 +14,7 @@ func root(l *lexer) stateFn { l.emitEOF() return nil case utils.IsSpace(r): - l.ignore() + l.skip() return root case r == '\'' || r == '"': l.scanString(r) @@ -83,14 +83,14 @@ func (l *lexer) scanNumber() bool { } } l.acceptRun(digits) - loc, prev, end := l.loc, l.prev, l.end + end := l.end if l.accept(".") { // Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator. if l.peek() == '.' { // We can't backup() here, as it would require two backups, // and backup() func supports only one for now. So, save and // restore it here. - l.loc, l.prev, l.end = loc, prev, end + l.end = end return true } l.acceptRun(digits) @@ -147,7 +147,7 @@ func not(l *lexer) stateFn { l.skipSpaces() - pos, loc, prev := l.end, l.loc, l.prev + end := l.end // Get the next word. for { @@ -164,7 +164,7 @@ func not(l *lexer) stateFn { case "in", "matches", "contains", "startsWith", "endsWith": l.emit(Operator) default: - l.end, l.loc, l.prev = pos, loc, prev + l.end = end } return root } @@ -193,7 +193,7 @@ func singleLineComment(l *lexer) stateFn { break } } - l.ignore() + l.skip() return root } @@ -207,7 +207,7 @@ func multiLineComment(l *lexer) stateFn { break } } - l.ignore() + l.skip() return root } diff --git a/vendor/github.com/expr-lang/expr/parser/parser.go b/vendor/github.com/expr-lang/expr/parser/parser.go index 9cb79cbbb42..77b2a700a31 100644 --- a/vendor/github.com/expr-lang/expr/parser/parser.go +++ b/vendor/github.com/expr-lang/expr/parser/parser.go @@ -33,7 +33,8 @@ var predicates = map[string]struct { "one": {[]arg{expr, closure}}, "filter": {[]arg{expr, closure}}, "map": {[]arg{expr, closure}}, - "count": {[]arg{expr, closure}}, + "count": {[]arg{expr, closure | optional}}, + "sum": {[]arg{expr, closure | optional}}, "find": {[]arg{expr, closure}}, "findIndex": {[]arg{expr, closure}}, "findLast": {[]arg{expr, closure}}, @@ -54,7 +55,7 @@ type parser struct { type Tree struct { Node Node - Source *file.Source + Source file.Source } func Parse(input string) (*Tree, error) { @@ -83,14 +84,16 @@ func ParseWithConfig(input string, config *conf.Config) (*Tree, error) { p.error("unexpected token %v", p.current) } + tree := &Tree{ + Node: node, + Source: source, + } + if p.err != nil { - return nil, p.err.Bind(source) + return tree, p.err.Bind(source) } - return &Tree{ - Node: node, - Source: source, - }, nil + return tree, nil } func (p *parser) error(format string, args ...any) { diff --git a/vendor/github.com/expr-lang/expr/patcher/with_context.go b/vendor/github.com/expr-lang/expr/patcher/with_context.go index 55b6042614f..f9861a2c2f7 100644 --- a/vendor/github.com/expr-lang/expr/patcher/with_context.go +++ b/vendor/github.com/expr-lang/expr/patcher/with_context.go @@ -22,11 +22,18 @@ func (w WithContext) Visit(node *ast.Node) { if fn.Kind() != reflect.Func { return } - if fn.NumIn() == 0 { - return - } - if fn.In(0).String() != "context.Context" { + switch fn.NumIn() { + case 0: return + case 1: + if fn.In(0).String() != "context.Context" { + return + } + default: + if fn.In(0).String() != "context.Context" && + fn.In(1).String() != "context.Context" { + return + } } ast.Patch(node, &ast.CallNode{ Callee: call.Callee, diff --git a/vendor/github.com/expr-lang/expr/patcher/with_timezone.go b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go new file mode 100644 index 00000000000..83eb28e95ac --- /dev/null +++ b/vendor/github.com/expr-lang/expr/patcher/with_timezone.go @@ -0,0 +1,25 @@ +package patcher + +import ( + "time" + + "github.com/expr-lang/expr/ast" +) + +// WithTimezone passes Location to date() and now() functions. +type WithTimezone struct { + Location *time.Location +} + +func (t WithTimezone) Visit(node *ast.Node) { + if btin, ok := (*node).(*ast.BuiltinNode); ok { + switch btin.Name { + case "date", "now": + loc := &ast.ConstantNode{Value: t.Location} + ast.Patch(node, &ast.BuiltinNode{ + Name: btin.Name, + Arguments: append([]ast.Node{loc}, btin.Arguments...), + }) + } + } +} diff --git a/vendor/github.com/expr-lang/expr/vm/program.go b/vendor/github.com/expr-lang/expr/vm/program.go index 98954674412..15ce26f5b28 100644 --- a/vendor/github.com/expr-lang/expr/vm/program.go +++ b/vendor/github.com/expr-lang/expr/vm/program.go @@ -21,7 +21,7 @@ type Program struct { Arguments []int Constants []any - source *file.Source + source file.Source node ast.Node locations []file.Location variables int @@ -32,7 +32,7 @@ type Program struct { // NewProgram returns a new Program. It's used by the compiler. func NewProgram( - source *file.Source, + source file.Source, node ast.Node, locations []file.Location, variables int, @@ -58,7 +58,7 @@ func NewProgram( } // Source returns origin file.Source. -func (program *Program) Source() *file.Source { +func (program *Program) Source() file.Source { return program.source } diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go index 3529fdd5867..d950f111147 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/helpers[generated].go @@ -334,6 +334,344 @@ func Equal(a, b interface{}) bool { case float64: return float64(x) == float64(y) } + case []any: + switch y := b.(type) { + case []string: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint8: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint16: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []uint64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int8: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int16: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []int64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []float32: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []float64: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + case []any: + if len(x) != len(y) { + return false + } + for i := range x { + if !Equal(x[i], y[i]) { + return false + } + } + return true + } + case []string: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []string: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint8: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint8: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint16: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint16: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []uint64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []uint64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int8: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int8: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int16: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int16: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []int64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []int64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []float32: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []float32: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } + case []float64: + switch y := b.(type) { + case []any: + return Equal(y, x) + case []float64: + if len(x) != len(y) { + return false + } + for i := range x { + if x[i] != y[i] { + return false + } + } + return true + } case string: switch y := b.(type) { case string: diff --git a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go index 7da1320de38..cd48a280dc3 100644 --- a/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go +++ b/vendor/github.com/expr-lang/expr/vm/runtime/runtime.go @@ -35,8 +35,12 @@ func Fetch(from, i any) any { switch v.Kind() { case reflect.Array, reflect.Slice, reflect.String: index := ToInt(i) + l := v.Len() if index < 0 { - index = v.Len() + index + index = l + index + } + if index < 0 || index >= l { + panic(fmt.Sprintf("index out of range: %v (array length is %v)", index, l)) } value := v.Index(index) if value.IsValid() { diff --git a/vendor/github.com/expr-lang/expr/vm/vm.go b/vendor/github.com/expr-lang/expr/vm/vm.go index 7e933ce7408..fa1223b420f 100644 --- a/vendor/github.com/expr-lang/expr/vm/vm.go +++ b/vendor/github.com/expr-lang/expr/vm/vm.go @@ -274,31 +274,50 @@ func (vm *VM) Run(program *Program, env any) (_ any, err error) { case OpMatches: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } match, err := regexp.MatchString(b.(string), a.(string)) if err != nil { panic(err) } - vm.push(match) case OpMatchesConst: a := vm.pop() + if runtime.IsNil(a) { + vm.push(false) + break + } r := program.Constants[arg].(*regexp.Regexp) vm.push(r.MatchString(a.(string))) case OpContains: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.Contains(a.(string), b.(string))) case OpStartsWith: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.HasPrefix(a.(string), b.(string))) case OpEndsWith: b := vm.pop() a := vm.pop() + if runtime.IsNil(a) || runtime.IsNil(b) { + vm.push(false) + break + } vm.push(strings.HasSuffix(a.(string), b.(string))) case OpSlice: diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3c..f4e7dbf37b3 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e5..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b12..daea9dd6d6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e57575496..fa854785d0f 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d51..e4ac2a2fffd 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8e..c349c326c71 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e401..36c311694cd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a07..d8de5ab76fd 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015f..5eb5dbc66f2 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d613..c54a6308383 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc4999..0760efe9160 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 00000000000..b0eab10090d --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 00000000000..928319fb09a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 00000000000..3186b0c3491 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 00000000000..f69fdb930f5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 00000000000..607e683bd73 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 00000000000..35c734be431 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 00000000000..e5b3b6f6943 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 00000000000..1dd455bc5a4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 00000000000..f1b2e73bd5b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 00000000000..52bf4ce53b5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 00000000000..547df1df84b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 00000000000..7daa45e19ee --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 00000000000..30976ce9739 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 00000000000..37dfeddc289 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 00000000000..a72c6495490 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae6539..00000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b8855..f65e8fe3edc 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78fe..a29fc7aab62 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/go-kit/kit/LICENSE b/vendor/github.com/go-kit/kit/LICENSE deleted file mode 100644 index 9d83342acdc..00000000000 --- a/vendor/github.com/go-kit/kit/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/go-kit/kit/metrics/README.md b/vendor/github.com/go-kit/kit/metrics/README.md deleted file mode 100644 index 5aa791a750d..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/README.md +++ /dev/null @@ -1,98 +0,0 @@ -# package metrics - -`package metrics` provides a set of uniform interfaces for service instrumentation. -It has - [counters](http://prometheus.io/docs/concepts/metric_types/#counter), - [gauges](http://prometheus.io/docs/concepts/metric_types/#gauge), and - [histograms](http://prometheus.io/docs/concepts/metric_types/#histogram), -and provides adapters to popular metrics packages, like - [expvar](https://golang.org/pkg/expvar), - [StatsD](https://github.com/etsy/statsd), and - [Prometheus](https://prometheus.io). - -## Rationale - -Code instrumentation is absolutely essential to achieve - [observability](https://speakerdeck.com/mattheath/observability-in-micro-service-architectures) - into a distributed system. -Metrics and instrumentation tools have coalesced around a few well-defined idioms. -`package metrics` provides a common, minimal interface those idioms for service authors. - -## Usage - -A simple counter, exported via expvar. - -```go -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" -) - -func main() { - var myCount metrics.Counter - myCount = expvar.NewCounter("my_count") - myCount.Add(1) -} -``` - -A histogram for request duration, - exported via a Prometheus summary with dynamically-computed quantiles. - -```go -import ( - "time" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" -) - -func main() { - var dur metrics.Histogram = prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ - Namespace: "myservice", - Subsystem: "api", - Name: "request_duration_seconds", - Help: "Total time spent serving requests.", - }, []string{}) - // ... -} - -func handleRequest(dur metrics.Histogram) { - defer func(begin time.Time) { dur.Observe(time.Since(begin).Seconds()) }(time.Now()) - // handle request -} -``` - -A gauge for the number of goroutines currently running, exported via StatsD. - -```go -import ( - "context" - "net" - "os" - "runtime" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/statsd" -) - -func main() { - statsd := statsd.New("foo_svc.", log.NewNopLogger()) - report := time.NewTicker(5 * time.Second) - defer report.Stop() - go statsd.SendLoop(context.Background(), report.C, "tcp", "statsd.internal:8125") - goroutines := statsd.NewGauge("goroutine_count") - go exportGoroutines(goroutines) - // ... -} - -func exportGoroutines(g metrics.Gauge) { - for range time.Tick(time.Second) { - g.Set(float64(runtime.NumGoroutine())) - } -} -``` - -For more information, see [the package documentation](https://godoc.org/github.com/go-kit/kit/metrics). diff --git a/vendor/github.com/go-kit/kit/metrics/doc.go b/vendor/github.com/go-kit/kit/metrics/doc.go deleted file mode 100644 index 25cda4f7c81..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/doc.go +++ /dev/null @@ -1,97 +0,0 @@ -// Package metrics provides a framework for application instrumentation. It's -// primarily designed to help you get started with good and robust -// instrumentation, and to help you migrate from a less-capable system like -// Graphite to a more-capable system like Prometheus. If your organization has -// already standardized on an instrumentation system like Prometheus, and has no -// plans to change, it may make sense to use that system's instrumentation -// library directly. -// -// This package provides three core metric abstractions (Counter, Gauge, and -// Histogram) and implementations for almost all common instrumentation -// backends. Each metric has an observation method (Add, Set, or Observe, -// respectively) used to record values, and a With method to "scope" the -// observation by various parameters. For example, you might have a Histogram to -// record request durations, parameterized by the method that's being called. -// -// var requestDuration metrics.Histogram -// // ... -// requestDuration.With("method", "MyMethod").Observe(time.Since(begin)) -// -// This allows a single high-level metrics object (requestDuration) to work with -// many code paths somewhat dynamically. The concept of With is fully supported -// in some backends like Prometheus, and not supported in other backends like -// Graphite. So, With may be a no-op, depending on the concrete implementation -// you choose. Please check the implementation to know for sure. For -// implementations that don't provide With, it's necessary to fully parameterize -// each metric in the metric name, e.g. -// -// // Statsd -// c := statsd.NewCounter("request_duration_MyMethod_200") -// c.Add(1) -// -// // Prometheus -// c := prometheus.NewCounter(stdprometheus.CounterOpts{ -// Name: "request_duration", -// ... -// }, []string{"method", "status_code"}) -// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1) -// -// Usage -// -// Metrics are dependencies, and should be passed to the components that need -// them in the same way you'd construct and pass a database handle, or reference -// to another component. Metrics should *not* be created in the global scope. -// Instead, instantiate metrics in your func main, using whichever concrete -// implementation is appropriate for your organization. -// -// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ -// Namespace: "myteam", -// Subsystem: "foosvc", -// Name: "request_latency_seconds", -// Help: "Incoming request latency in seconds.", -// }, []string{"method", "status_code"}) -// -// Write your components to take the metrics they will use as parameters to -// their constructors. Use the interface types, not the concrete types. That is, -// -// // NewAPI takes metrics.Histogram, not *prometheus.Summary -// func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { -// // ... -// } -// -// func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { -// begin := time.Now() -// // ... -// a.latency.Observe(time.Since(begin).Seconds()) -// } -// -// Finally, pass the metrics as dependencies when building your object graph. -// This should happen in func main, not in the global scope. -// -// api := NewAPI(store, logger, latency) -// http.ListenAndServe("/", api) -// -// Note that metrics are "write-only" interfaces. -// -// Implementation details -// -// All metrics are safe for concurrent use. Considerable design influence has -// been taken from https://github.com/codahale/metrics and -// https://prometheus.io. -// -// Each telemetry system has different semantics for label values, push vs. -// pull, support for histograms, etc. These properties influence the design of -// their respective packages. This table attempts to summarize the key points of -// distinction. -// -// SYSTEM DIM COUNTERS GAUGES HISTOGRAMS -// dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each -// statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each -// graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate -// expvar 1 atomic atomic synthetic, batch, in-place expose -// influx n custom custom custom -// prometheus n native native native -// pcp 1 native native native -// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate -// -package metrics diff --git a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go b/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go deleted file mode 100644 index ce6f3b836a0..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/expvar/expvar.go +++ /dev/null @@ -1,94 +0,0 @@ -// Package expvar provides expvar backends for metrics. -// Label values are not supported. -package expvar - -import ( - "expvar" - "sync" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/generic" -) - -// Counter implements the counter metric with an expvar float. -// Label values are not supported. -type Counter struct { - f *expvar.Float -} - -// NewCounter creates an expvar Float with the given name, and returns an object -// that implements the Counter interface. -func NewCounter(name string) *Counter { - return &Counter{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (c *Counter) With(labelValues ...string) metrics.Counter { return c } - -// Add implements Counter. -func (c *Counter) Add(delta float64) { c.f.Add(delta) } - -// Gauge implements the gauge metric with an expvar float. -// Label values are not supported. -type Gauge struct { - f *expvar.Float -} - -// NewGauge creates an expvar Float with the given name, and returns an object -// that implements the Gauge interface. -func NewGauge(name string) *Gauge { - return &Gauge{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { g.f.Set(value) } - -// Add implements metrics.Gauge. -func (g *Gauge) Add(delta float64) { g.f.Add(delta) } - -// Histogram implements the histogram metric with a combination of the generic -// Histogram object and several expvar Floats, one for each of the 50th, 90th, -// 95th, and 99th quantiles of observed values, with the quantile attached to -// the name as a suffix. Label values are not supported. -type Histogram struct { - mtx sync.Mutex - h *generic.Histogram - p50 *expvar.Float - p90 *expvar.Float - p95 *expvar.Float - p99 *expvar.Float -} - -// NewHistogram returns a Histogram object with the given name and number of -// buckets in the underlying histogram object. 50 is a good default number of -// buckets. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - h: generic.NewHistogram(name, buckets), - p50: expvar.NewFloat(name + ".p50"), - p90: expvar.NewFloat(name + ".p90"), - p95: expvar.NewFloat(name + ".p95"), - p99: expvar.NewFloat(name + ".p99"), - } -} - -// With is a no-op. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.h.Observe(value) - h.p50.Set(h.h.Quantile(0.50)) - h.p90.Set(h.h.Quantile(0.90)) - h.p95.Set(h.h.Quantile(0.95)) - h.p99.Set(h.h.Quantile(0.99)) -} diff --git a/vendor/github.com/go-kit/kit/metrics/generic/generic.go b/vendor/github.com/go-kit/kit/metrics/generic/generic.go deleted file mode 100644 index 39216c04f09..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/generic/generic.go +++ /dev/null @@ -1,247 +0,0 @@ -// Package generic implements generic versions of each of the metric types. They -// can be embedded by other implementations, and converted to specific formats -// as necessary. -package generic - -import ( - "fmt" - "io" - "math" - "sync" - "sync/atomic" - - "github.com/VividCortex/gohistogram" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/internal/lv" -) - -// Counter is an in-memory implementation of a Counter. -type Counter struct { - bits uint64 // bits has to be the first word in order to be 64-aligned on 32-bit - Name string - lvs lv.LabelValues -} - -// NewCounter returns a new, usable Counter. -func NewCounter(name string) *Counter { - return &Counter{ - Name: name, - } -} - -// With implements Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - Name: c.Name, - bits: atomic.LoadUint64(&c.bits), - lvs: c.lvs.With(labelValues...), - } -} - -// Add implements Counter. -func (c *Counter) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - break - } - } -} - -// Value returns the current value of the counter. -func (c *Counter) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&c.bits)) -} - -// ValueReset returns the current value of the counter, and resets it to zero. -// This is useful for metrics backends whose counter aggregations expect deltas, -// like Graphite. -func (c *Counter) ValueReset() float64 { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = 0.0 - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - return math.Float64frombits(old) - } - } -} - -// LabelValues returns the set of label values attached to the counter. -func (c *Counter) LabelValues() []string { - return c.lvs -} - -// Gauge is an in-memory implementation of a Gauge. -type Gauge struct { - bits uint64 // bits has to be the first word in order to be 64-aligned on 32-bit - Name string - lvs lv.LabelValues -} - -// NewGauge returns a new, usable Gauge. -func NewGauge(name string) *Gauge { - return &Gauge{ - Name: name, - } -} - -// With implements Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - Name: g.Name, - bits: atomic.LoadUint64(&g.bits), - lvs: g.lvs.With(labelValues...), - } -} - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { - atomic.StoreUint64(&g.bits, math.Float64bits(value)) -} - -// Add implements metrics.Gauge. -func (g *Gauge) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&g.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&g.bits, old, new) { - break - } - } -} - -// Value returns the current value of the gauge. -func (g *Gauge) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.bits)) -} - -// LabelValues returns the set of label values attached to the gauge. -func (g *Gauge) LabelValues() []string { - return g.lvs -} - -// Histogram is an in-memory implementation of a streaming histogram, based on -// VividCortex/gohistogram. It dynamically computes quantiles, so it's not -// suitable for aggregation. -type Histogram struct { - Name string - lvs lv.LabelValues - h *safeHistogram -} - -// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A -// good default value for buckets is 50. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - Name: name, - h: &safeHistogram{Histogram: gohistogram.NewHistogram(buckets)}, - } -} - -// With implements Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - Name: h.Name, - lvs: h.lvs.With(labelValues...), - h: h.h, - } -} - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.h.Lock() - defer h.h.Unlock() - h.h.Add(value) -} - -// Quantile returns the value of the quantile q, 0.0 < q < 1.0. -func (h *Histogram) Quantile(q float64) float64 { - h.h.RLock() - defer h.h.RUnlock() - return h.h.Quantile(q) -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *Histogram) LabelValues() []string { - return h.lvs -} - -// Print writes a string representation of the histogram to the passed writer. -// Useful for printing to a terminal. -func (h *Histogram) Print(w io.Writer) { - h.h.RLock() - defer h.h.RUnlock() - fmt.Fprint(w, h.h.String()) -} - -// safeHistogram exists as gohistogram.Histogram is not goroutine-safe. -type safeHistogram struct { - sync.RWMutex - gohistogram.Histogram -} - -// Bucket is a range in a histogram which aggregates observations. -type Bucket struct { - From, To, Count int64 -} - -// Quantile is a pair of a quantile (0..100) and its observed maximum value. -type Quantile struct { - Quantile int // 0..100 - Value int64 -} - -// SimpleHistogram is an in-memory implementation of a Histogram. It only tracks -// an approximate moving average, so is likely too naïve for many use cases. -type SimpleHistogram struct { - mtx sync.RWMutex - lvs lv.LabelValues - avg float64 - n uint64 -} - -// NewSimpleHistogram returns a SimpleHistogram, ready for observations. -func NewSimpleHistogram() *SimpleHistogram { - return &SimpleHistogram{} -} - -// With implements Histogram. -func (h *SimpleHistogram) With(labelValues ...string) metrics.Histogram { - return &SimpleHistogram{ - lvs: h.lvs.With(labelValues...), - avg: h.avg, - n: h.n, - } -} - -// Observe implements Histogram. -func (h *SimpleHistogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.n++ - h.avg -= h.avg / float64(h.n) - h.avg += value / float64(h.n) -} - -// ApproximateMovingAverage returns the approximate moving average of observations. -func (h *SimpleHistogram) ApproximateMovingAverage() float64 { - h.mtx.RLock() - defer h.mtx.RUnlock() - return h.avg -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *SimpleHistogram) LabelValues() []string { - return h.lvs -} diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go deleted file mode 100644 index 8bb1ba09414..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/internal/lv/labelvalues.go +++ /dev/null @@ -1,14 +0,0 @@ -package lv - -// LabelValues is a type alias that provides validation on its With method. -// Metrics may include it as a member to help them satisfy With semantics and -// save some code duplication. -type LabelValues []string - -// With validates the input, and returns a new aggregate labelValues. -func (lvs LabelValues) With(labelValues ...string) LabelValues { - if len(labelValues)%2 != 0 { - labelValues = append(labelValues, "unknown") - } - return append(lvs, labelValues...) -} diff --git a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go b/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go deleted file mode 100644 index 371964a35ae..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/internal/lv/space.go +++ /dev/null @@ -1,145 +0,0 @@ -package lv - -import "sync" - -// NewSpace returns an N-dimensional vector space. -func NewSpace() *Space { - return &Space{} -} - -// Space represents an N-dimensional vector space. Each name and unique label -// value pair establishes a new dimension and point within that dimension. Order -// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1]. -type Space struct { - mtx sync.RWMutex - nodes map[string]*node -} - -// Observe locates the time series identified by the name and label values in -// the vector space, and appends the value to the list of observations. -func (s *Space) Observe(name string, lvs LabelValues, value float64) { - s.nodeFor(name).observe(lvs, value) -} - -// Add locates the time series identified by the name and label values in -// the vector space, and appends the delta to the last value in the list of -// observations. -func (s *Space) Add(name string, lvs LabelValues, delta float64) { - s.nodeFor(name).add(lvs, delta) -} - -// Walk traverses the vector space and invokes fn for each non-empty time series -// which is encountered. Return false to abort the traversal. -func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { - s.mtx.RLock() - defer s.mtx.RUnlock() - for name, node := range s.nodes { - f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) } - if !node.walk(LabelValues{}, f) { - return - } - } -} - -// Reset empties the current space and returns a new Space with the old -// contents. Reset a Space to get an immutable copy suitable for walking. -func (s *Space) Reset() *Space { - s.mtx.Lock() - defer s.mtx.Unlock() - n := NewSpace() - n.nodes, s.nodes = s.nodes, n.nodes - return n -} - -func (s *Space) nodeFor(name string) *node { - s.mtx.Lock() - defer s.mtx.Unlock() - if s.nodes == nil { - s.nodes = map[string]*node{} - } - n, ok := s.nodes[name] - if !ok { - n = &node{} - s.nodes[name] = n - } - return n -} - -// node exists at a specific point in the N-dimensional vector space of all -// possible label values. The node collects observations and has child nodes -// with greater specificity. -type node struct { - mtx sync.RWMutex - observations []float64 - children map[pair]*node -} - -type pair struct{ label, value string } - -func (n *node) observe(lvs LabelValues, value float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) <= 0 { - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.observe(tail, value) -} - -func (n *node) add(lvs LabelValues, delta float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) <= 0 { - var value float64 - if len(n.observations) > 0 { - value = last(n.observations) + delta - } else { - value = delta - } - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.add(tail, delta) -} - -func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { - n.mtx.RLock() - defer n.mtx.RUnlock() - if len(n.observations) > 0 && !fn(lvs, n.observations) { - return false - } - for p, child := range n.children { - if !child.walk(append(lvs, p.label, p.value), fn) { - return false - } - } - return true -} - -func last(a []float64) float64 { - return a[len(a)-1] -} diff --git a/vendor/github.com/go-kit/kit/metrics/metrics.go b/vendor/github.com/go-kit/kit/metrics/metrics.go deleted file mode 100644 index a7ba1b1fe3f..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/metrics.go +++ /dev/null @@ -1,25 +0,0 @@ -package metrics - -// Counter describes a metric that accumulates values monotonically. -// An example of a counter is the number of received HTTP requests. -type Counter interface { - With(labelValues ...string) Counter - Add(delta float64) -} - -// Gauge describes a metric that takes specific values over time. -// An example of a gauge is the current depth of a job queue. -type Gauge interface { - With(labelValues ...string) Gauge - Set(value float64) - Add(delta float64) -} - -// Histogram describes a metric that takes repeated observations of the same -// kind of thing, and produces a statistical summary of those observations, -// typically expressed as quantiles or buckets. An example of a histogram is -// HTTP request latencies. -type Histogram interface { - With(labelValues ...string) Histogram - Observe(value float64) -} diff --git a/vendor/github.com/go-kit/kit/metrics/timer.go b/vendor/github.com/go-kit/kit/metrics/timer.go deleted file mode 100644 index e12d9cd5c49..00000000000 --- a/vendor/github.com/go-kit/kit/metrics/timer.go +++ /dev/null @@ -1,36 +0,0 @@ -package metrics - -import "time" - -// Timer acts as a stopwatch, sending observations to a wrapped histogram. -// It's a bit of helpful syntax sugar for h.Observe(time.Since(x)). -type Timer struct { - h Histogram - t time.Time - u time.Duration -} - -// NewTimer wraps the given histogram and records the current time. -func NewTimer(h Histogram) *Timer { - return &Timer{ - h: h, - t: time.Now(), - u: time.Second, - } -} - -// ObserveDuration captures the number of seconds since the timer was -// constructed, and forwards that observation to the histogram. -func (t *Timer) ObserveDuration() { - d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u) - if d < 0 { - d = 0 - } - t.h.Observe(d) -} - -// Unit sets the unit of the float64 emitted by the timer. -// By default, the timer emits seconds. -func (t *Timer) Unit(u time.Duration) { - t.u = u -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md index ae634d1cc08..afd44e5f5fc 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -1,3 +1,6 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + ## 1.5.1 * Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md index 2b28db89489..dd5ec69ddf7 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/README.md +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -19,6 +19,27 @@ structure. go get github.com/go-viper/mapstructure/v2 ``` +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + ## Usage & Example For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go index 840d6adce0e..1f3c69d4b8c 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "net/netip" + "net/url" "reflect" "strconv" "strings" @@ -36,6 +37,30 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { return nil } +// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + // DecodeHookExec executes the given decode hook. This should be used // since it'll naturally degrade to the older backwards compatible DecodeHookFunc // that took reflect.Kind instead of reflect.Type. @@ -61,13 +86,17 @@ func DecodeHookExec( // The composed funcs are called in order, with the result of the // previous transformation. func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error data := f.Interface() newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) + for _, c := range cached { + data, err = c(newFrom, t) if err != nil { return nil, err } @@ -81,13 +110,17 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { // OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. // If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } return func(a, b reflect.Value) (interface{}, error) { var allErrs string var out interface{} var err error - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) + for _, c := range cached { + out, err = c(a, b) if err != nil { allErrs += err.Error() + "\n" continue @@ -144,6 +177,26 @@ func StringToTimeDurationHookFunc() DecodeHookFunc { } } +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + return url.Parse(data.(string)) + } +} + // StringToIPHookFunc returns a DecodeHookFunc that converts // strings to net.IP func StringToIPHookFunc() DecodeHookFunc { @@ -332,3 +385,246 @@ func StringToNetIPAddrPortHookFunc() DecodeHookFunc { return netip.ParseAddrPort(data.(string)) } } + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), err + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), err + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), err + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), err + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), err + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), err + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseInt(data.(string), 0, 64) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseUint(data.(string), 0, 64) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), err + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), err + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), err + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseFloat(data.(string), 64) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + return strconv.ParseBool(data.(string)) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), err + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseComplex(data.(string), 128) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/error.go b/vendor/github.com/go-viper/mapstructure/v2/error.go deleted file mode 100644 index 47a99e5af3f..00000000000 --- a/vendor/github.com/go-viper/mapstructure/v2/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock index 5a387d32994..4bea8154e04 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/flake.lock +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -1,22 +1,84 @@ { "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, "devenv": { "inputs": { - "flake-compat": "flake-compat", + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], "nix": "nix", "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] }, "locked": { - "lastModified": 1702549996, - "narHash": "sha256-mEN+8gjWUXRxBCcixeth+jlDNuzxbpFwZNOEc4K22vw=", + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", "owner": "cachix", "repo": "devenv", - "rev": "e681a99ffe2d2882f413a5d771129223c838ddce", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", "type": "github" }, "original": { "owner": "cachix", + "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -37,16 +99,32 @@ "type": "github" } }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-parts": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1701473968, - "narHash": "sha256-YcVE5emp1qQ8ieHUnxt1wCZCC3ZfAS+SRRWZ2TMda7E=", + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "34fed993f1674c8d06d58b37ce1e0fe5eebcb9f5", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", "type": "github" }, "original": { @@ -60,11 +138,29 @@ "systems": "systems" }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -82,11 +178,11 @@ ] }, "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", "owner": "hercules-ci", "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", "type": "github" }, "original": { @@ -95,53 +191,90 @@ "type": "github" } }, - "lowdown-src": { - "flake": false, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { - "owner": "kristapsdz", - "repo": "lowdown", + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", "type": "github" } }, - "nix": { + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { "inputs": { - "lowdown-src": "lowdown-src", + "flake-compat": [ + "devenv", + "flake-compat" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression" + "nixpkgs-regression": "nixpkgs-regression_2" }, "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", "owner": "domenkozar", "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { "owner": "domenkozar", - "ref": "relaxed-flakes", + "ref": "devenv-2.21", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", "type": "github" }, "original": { @@ -153,23 +286,33 @@ }, "nixpkgs-lib": { "locked": { - "dir": "lib", - "lastModified": 1701253981, - "narHash": "sha256-ztaDIyZ7HrTAfEEUt9AtTDNoCYxUdSd6NrRHaYOIxtk=", + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "e92039b55bcd58469325ded85d4f58dd5a4eaf58", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" }, "original": { - "dir": "lib", "owner": "NixOS", - "ref": "nixos-unstable", "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" } }, - "nixpkgs-regression": { + "nixpkgs-regression_2": { "locked": { "lastModified": 1643052045, "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", @@ -187,27 +330,43 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1702539185, - "narHash": "sha256-KnIRG5NMdLIpEkZTnN5zovNYc0hhXjAgv6pfd5Z4c7U=", + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "aa9d4729cbc99dabacb50e3994dcefb3ea0f7447", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", "type": "github" }, "original": { @@ -217,13 +376,38 @@ "type": "github" } }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, "pre-commit-hooks": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], - "flake-utils": "flake-utils", + "flake-utils": "flake-utils_2", "gitignore": "gitignore", "nixpkgs": [ "devenv", @@ -232,11 +416,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", "type": "github" }, "original": { @@ -249,7 +433,7 @@ "inputs": { "devenv": "devenv", "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_3" } }, "systems": { @@ -266,6 +450,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 00000000000..d1c15e474f4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 00000000000..d74e3a0b5a4 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 00000000000..700b40229cb --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 27f21bc7219..e77e63ba383 100644 --- a/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -160,12 +160,13 @@ package mapstructure import ( "encoding/json" - "errors" "fmt" "reflect" "sort" "strconv" "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" ) // DecodeHookFunc is the callback function that can be used for @@ -265,6 +266,10 @@ type DecoderConfig struct { // defaults to "mapstructure" TagName string + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + // IgnoreUntaggedFields ignores all struct fields without explicit // TagName, comparable to `mapstructure:"-"` as default behaviour. IgnoreUntaggedFields bool @@ -273,6 +278,10 @@ type DecoderConfig struct { // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool } // A Decoder takes a raw interface value and turns it into structured @@ -282,7 +291,8 @@ type DecoderConfig struct { // structure. The top-level Decode method is just a convenience that sets // up the most basic Decoder. type Decoder struct { - config *DecoderConfig + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error) } // Metadata contains information about decoding a structure that @@ -400,6 +410,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + if config.MatchName == nil { config.MatchName = strings.EqualFold } @@ -407,6 +421,9 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { result := &Decoder{ config: config, } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } return result, nil } @@ -414,22 +431,37 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input interface{}) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() } // Decodes an unknown data type into a specific reflection value. func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil } - if input == nil { // If the data is nil, then we don't set anything, unless ZeroFields is set // to true. @@ -440,30 +472,46 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } } - return nil + if !decodeNil { + return nil + } } - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]interface{} + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []interface{} + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) } - return nil } - if d.config.DecodeHook != nil { + if d.cachedDecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + input, err = d.cachedDecodeHook(inputVal, outVal) if err != nil { return fmt.Errorf("error decoding '%s': %w", name, err) } } + if isNil(input) { + return nil + } var err error - outputKind := getKind(outVal) addMetaKey := true switch outputKind { case reflect.Bool: @@ -478,6 +526,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e err = d.decodeUint(name, input, outVal) case reflect.Float32: err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) case reflect.Struct: err = d.decodeStruct(name, input, outVal) case reflect.Map: @@ -742,8 +792,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) + "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'", + name, val, dataVal, data) } return nil @@ -796,6 +846,22 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) return nil } +func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { valType := val.Type() valKeyType := valType.Key() @@ -863,7 +929,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle valElemType := valType.Elem() // Accumulate errors - errors := make([]string, 0) + var errs []error // If the input data is empty, then we just match what the input data is. if dataVal.Len() == 0 { @@ -885,7 +951,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -893,7 +959,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle v := dataVal.MapIndex(k).Interface() currentVal := reflect.Indirect(reflect.New(valElemType)) if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -903,12 +969,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // Set the built up map to the value val.Set(valMap) - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { @@ -951,7 +1012,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -1146,7 +1207,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1157,19 +1218,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the slice we built up val.Set(valSlice) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { @@ -1215,7 +1271,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1223,19 +1279,14 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the array we built up val.Set(valArray) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { @@ -1297,7 +1348,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) + + var errs []error // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs @@ -1338,7 +1390,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { - if tag == "squash" { + if tag == d.config.SquashTagOption { squash = true break } @@ -1350,11 +1402,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { + switch fieldVal.Kind() { + case reflect.Struct: structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } continue } @@ -1431,7 +1487,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } @@ -1446,7 +1502,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // Decode it as-if we were just decoding this map onto our map. if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } // Set the map to nil so we have none so that the next check will @@ -1462,7 +1518,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { @@ -1473,11 +1529,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } - if len(errors) > 0 { - return &Error{errors} + if err := errors.Join(errs...); err != nil { + return err } // Add the unused keys to the list of unused keys if we're tracking metadata @@ -1531,6 +1587,8 @@ func getKind(val reflect.Value) reflect.Kind { return reflect.Uint case kind >= reflect.Float32 && kind <= reflect.Float64: return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 default: return kind } diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile.go b/vendor/github.com/goccy/go-json/internal/decoder/compile.go index fab6437647b..8ad50936c0c 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "sync" "sync/atomic" "unicode" "unsafe" @@ -17,22 +18,27 @@ var ( typeAddr *runtime.TypeAddr cachedDecoderMap unsafe.Pointer // map[uintptr]decoder cachedDecoder []Decoder + initOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initDecoder() { + initOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedDecoder = make([]Decoder, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadDecoderMap() map[uintptr]Decoder { + initDecoder() p := atomic.LoadPointer(&cachedDecoderMap) return *(*map[uintptr]Decoder)(unsafe.Pointer(&p)) } func storeDecoder(typ uintptr, dec Decoder, m map[uintptr]Decoder) { + initDecoder() newDecoderMap := make(map[uintptr]Decoder, len(m)+1) newDecoderMap[typ] = dec diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go index eb7e2b1345d..025ca85b5e2 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_norace.go @@ -10,6 +10,7 @@ import ( ) func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go index 49cdda4a172..023b817c368 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/compile_race.go @@ -13,6 +13,7 @@ import ( var decMu sync.RWMutex func CompileToGetDecoder(typ *runtime.Type) (Decoder, error) { + initDecoder() typeptr := uintptr(unsafe.Pointer(typ)) if typeptr > typeAddr.MaxTypeAddr { return compileToGetDecoderSlowPath(typeptr, typ) diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index 37b7aa38e26..b107636890a 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -5,6 +5,7 @@ import ( "encoding" "encoding/json" "reflect" + "sync" "sync/atomic" "unsafe" @@ -24,14 +25,17 @@ var ( cachedOpcodeSets []*OpcodeSet cachedOpcodeMap unsafe.Pointer // map[uintptr]*OpcodeSet typeAddr *runtime.TypeAddr + initEncoderOnce sync.Once ) -func init() { - typeAddr = runtime.AnalyzeTypeAddr() - if typeAddr == nil { - typeAddr = &runtime.TypeAddr{} - } - cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) +func initEncoder() { + initEncoderOnce.Do(func() { + typeAddr = runtime.AnalyzeTypeAddr() + if typeAddr == nil { + typeAddr = &runtime.TypeAddr{} + } + cachedOpcodeSets = make([]*OpcodeSet, typeAddr.AddrRange>>typeAddr.AddrShift+1) + }) } func loadOpcodeMap() map[uintptr]*OpcodeSet { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go index 20c93cbf709..b6f45a49b0e 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go @@ -4,6 +4,7 @@ package encoder func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go index 13ba23fdff8..47b482f7fb6 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler_race.go @@ -10,6 +10,7 @@ import ( var setsMu sync.RWMutex func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { + initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go index 14eb6a0d643..b436f5b21ff 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/encoder.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/encoder.go @@ -406,6 +406,11 @@ func AppendMarshalJSON(ctx *RuntimeContext, code *Opcode, b []byte, v interface{ rv = newV } } + + if rv.Kind() == reflect.Ptr && rv.IsNil() { + return AppendNull(ctx, b), nil + } + v = rv.Interface() var bb []byte if (code.Flags & MarshalerContextFlags) != 0 { diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go index 16278a1d995..fcd049de922 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/common/common.proto @@ -145,8 +145,8 @@ type Identity struct { // *Identity_SpiffeId // *Identity_Hostname // *Identity_Uid - // *Identity_MdbUsername - // *Identity_GaiaId + // *Identity_Username + // *Identity_GcpId IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` // Additional identity-specific attributes. Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -212,16 +212,16 @@ func (x *Identity) GetUid() string { return "" } -func (x *Identity) GetMdbUsername() string { - if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { - return x.MdbUsername +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username } return "" } -func (x *Identity) GetGaiaId() string { - if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { - return x.GaiaId +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId } return "" } @@ -252,14 +252,14 @@ type Identity_Uid struct { Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` } -type Identity_MdbUsername struct { - // The MDB username of a connection endpoint. - MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` } -type Identity_GaiaId struct { - // The Gaia ID of a connection endpoint. - GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` } func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} @@ -268,9 +268,9 @@ func (*Identity_Hostname) isIdentity_IdentityOneof() {} func (*Identity_Uid) isIdentity_IdentityOneof() {} -func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} +func (*Identity_Username) isIdentity_IdentityOneof() {} -func (*Identity_GaiaId) isIdentity_IdentityOneof() {} +func (*Identity_GcpId) isIdentity_IdentityOneof() {} var File_internal_proto_common_common_proto protoreflect.FileDescriptor @@ -278,38 +278,37 @@ var file_internal_proto_common_common_proto_rawDesc = []byte{ 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0xa8, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, - 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, - 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, - 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, - 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, - 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, - 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, + 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, + 0x63, 0x70, 0x49, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, + 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, + 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -326,7 +325,7 @@ func file_internal_proto_common_common_proto_rawDescGZIP() []byte { var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_proto_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.TLSVersion (*Identity)(nil), // 2: s2a.proto.Identity @@ -347,7 +346,7 @@ func file_internal_proto_common_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -360,12 +359,12 @@ func file_internal_proto_common_common_proto_init() { } } } - file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []any{ (*Identity_SpiffeId)(nil), (*Identity_Hostname)(nil), (*Identity_Uid)(nil), - (*Identity_MdbUsername)(nil), - (*Identity_GaiaId)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), } type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go index f4f763ae102..2af3ee3dc1c 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a_context/s2a_context.proto @@ -209,7 +209,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.S2AContext (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite @@ -233,7 +233,7 @@ func file_internal_proto_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go index 0a86ebee592..8919232fd88 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -1171,7 +1171,7 @@ func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { } var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_s2a_s2a_proto_goTypes = []any{ (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq @@ -1226,7 +1226,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -1238,7 +1238,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientSessionStartReq); i { case 0: return &v.state @@ -1250,7 +1250,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerSessionStartReq); i { case 0: return &v.state @@ -1262,7 +1262,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*SessionNextReq); i { case 0: return &v.state @@ -1274,7 +1274,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*ResumptionTicketReq); i { case 0: return &v.state @@ -1286,7 +1286,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -1298,7 +1298,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*SessionState); i { case 0: return &v.state @@ -1310,7 +1310,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*SessionResult); i { case 0: return &v.state @@ -1322,7 +1322,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*SessionStatus); i { case 0: return &v.state @@ -1334,7 +1334,7 @@ func file_internal_proto_s2a_s2a_proto_init() { return nil } } - file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -1347,10 +1347,10 @@ func file_internal_proto_s2a_s2a_proto_init() { } } } - file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*SessionReq_ClientStart)(nil), (*SessionReq_ServerStart)(nil), (*SessionReq_Next)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go index 0fa582fc874..8fac3841be5 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" @@ -61,11 +61,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -129,7 +130,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go index c84bed97748..e9aa5d14c0d 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/common/common.proto @@ -256,62 +256,218 @@ func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} } +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_Username + // *Identity_GcpId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_Username); ok { + return x.Username + } + return "" +} + +func (x *Identity) GetGcpId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GcpId); ok { + return x.GcpId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_Username struct { + // The username of a connection endpoint. + Username string `protobuf:"bytes,5,opt,name=username,proto3,oneof"` +} + +type Identity_GcpId struct { + // The GCP ID of a connection endpoint. + GcpId string `protobuf:"bytes,6,opt,name=gcp_id,json=gcpId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_Username) isIdentity_IdentityOneof() {} + +func (*Identity_GcpId) isIdentity_IdentityOneof() {} + var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, - 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, - 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, - 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, - 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, - 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, - 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, - 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, - 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, - 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, - 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, - 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x22, 0xab, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, + 0x75, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x17, 0x0a, 0x06, 0x67, 0x63, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x05, 0x67, 0x63, 0x70, 0x49, 0x64, 0x12, 0x46, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, + 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, + 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, - 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, - 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, - 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, - 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, - 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, - 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, - 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, - 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, - 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, - 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, - 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, - 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, - 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, - 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, + 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, + 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, + 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, + 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, 0x33, 0x43, + 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, + 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, + 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, + 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, + 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, + 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, 0x12, 0x13, + 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, + 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x79, + 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1d, + 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, + 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x47, + 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, + 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x12, 0x19, + 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, + 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -327,18 +483,22 @@ func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ +var file_internal_proto_v2_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_v2_common_common_proto_goTypes = []any{ (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol + (*Identity)(nil), // 4: s2a.proto.v2.Identity + nil, // 5: s2a.proto.v2.Identity.AttributesEntry } var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 5, // 0: s2a.proto.v2.Identity.attributes:type_name -> s2a.proto.v2.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_internal_proto_v2_common_common_proto_init() } @@ -346,19 +506,41 @@ func file_internal_proto_v2_common_common_proto_init() { if File_internal_proto_v2_common_common_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_common_common_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_common_common_proto_msgTypes[0].OneofWrappers = []any{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_Username)(nil), + (*Identity_GcpId)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, NumEnums: 4, - NumMessages: 0, + NumMessages: 2, NumExtensions: 0, NumServices: 0, }, GoTypes: file_internal_proto_v2_common_common_proto_goTypes, DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_v2_common_common_proto_msgTypes, }.Build() File_internal_proto_v2_common_common_proto = out.File file_internal_proto_v2_common_common_proto_rawDesc = nil diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go index b7fd871c7a7..418331a4bde 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -14,14 +14,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a_context/s2a_context.proto package s2a_context_go_proto import ( - common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -64,7 +64,7 @@ type S2AContext struct { // certificate chain was NOT validated successfully. PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` // The local identity used during session setup. - LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,9,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The SHA256 hash of the DER-encoding of the local leaf certificate used in // the handshake. LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` @@ -151,35 +151,36 @@ var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, - 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, - 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, - 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, - 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, - 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, - 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, - 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, + 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, + 0x44, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, + 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, + 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, + 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -195,12 +196,12 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { } var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []any{ (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext - (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 1: s2a.proto.v2.Identity } var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ - 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.v2.Identity 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name @@ -214,7 +215,7 @@ func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*S2AContext); i { case 0: return &v.state diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go index e843450c7ed..548f31da2d5 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.2 // protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto package s2a_go_proto import ( - common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -292,6 +291,12 @@ const ( // The connect-to-Google verification mode uses the trust bundle for // connecting to Google, e.g. *.mtls.googleapis.com endpoints. ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 ValidatePeerCertificateChainReq_VerificationMode = 3 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 ValidatePeerCertificateChainReq_VerificationMode = 4 + // Internal use only. + ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 ValidatePeerCertificateChainReq_VerificationMode = 5 ) // Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. @@ -300,11 +305,17 @@ var ( 0: "UNSPECIFIED", 1: "SPIFFE", 2: "CONNECT_TO_GOOGLE", + 3: "RESERVED_CUSTOM_VERIFICATION_MODE_3", + 4: "RESERVED_CUSTOM_VERIFICATION_MODE_4", + 5: "RESERVED_CUSTOM_VERIFICATION_MODE_5", } ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ - "UNSPECIFIED": 0, - "SPIFFE": 1, - "CONNECT_TO_GOOGLE": 2, + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + "RESERVED_CUSTOM_VERIFICATION_MODE_3": 3, + "RESERVED_CUSTOM_VERIFICATION_MODE_4": 4, + "RESERVED_CUSTOM_VERIFICATION_MODE_5": 5, } ) @@ -454,7 +465,7 @@ type AuthenticationMechanism struct { // mechanism. Otherwise, S2A assumes that the authentication mechanism is // associated with the default identity. If the default identity cannot be // determined, the request is rejected. - Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Identity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` // Types that are assignable to MechanismOneof: // // *AuthenticationMechanism_Token @@ -493,7 +504,7 @@ func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} } -func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { if x != nil { return x.Identity } @@ -1185,7 +1196,7 @@ type SessionReq struct { // identity is not populated, S2A will try to deduce the managed identity to // use from the SNI extension. If that also fails, S2A uses the default // identity (if one exists). - LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,7,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` // The authentication mechanisms that the application wishes to use to // authenticate to S2A, ordered by preference. S2A will always use the first // authentication mechanism that matches the managed identity. @@ -1231,7 +1242,7 @@ func (*SessionReq) Descriptor() ([]byte, []int) { return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} } -func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { +func (x *SessionReq) GetLocalIdentity() *common_go_proto.Identity { if x != nil { return x.LocalIdentity } @@ -1790,358 +1801,365 @@ var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, - 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, - 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, - 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, - 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, - 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, - 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, - 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x22, 0x7e, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x32, 0x0a, 0x08, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, + 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x73, 0x69, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, - 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, - 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, - 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, - 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, - 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, - 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, - 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, - 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, - 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, - 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, - 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, - 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, - 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, - 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, - 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, - 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, - 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, - 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, - 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, - 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, - 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, - 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, - 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x6f, 0x6e, 0x12, 0x78, 0x0a, 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, + 0x16, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, + 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, + 0x06, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, + 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, + 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, + 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, + 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, + 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x93, 0x01, 0x0a, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, + 0x72, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, + 0x61, 0x65, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, + 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, + 0x65, 0x61, 0x64, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, + 0x02, 0x0a, 0x18, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, + 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, + 0x01, 0x12, 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, + 0x02, 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, + 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, + 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, + 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, + 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, + 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xb0, 0x03, 0x0a, 0x1d, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x5d, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x51, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, + 0x61, 0x33, 0x38, 0x34, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, + 0x31, 0x32, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x3d, 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, - 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, - 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, - 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, - 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, - 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, - 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, - 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, - 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, - 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, - 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, - 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, - 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, + 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf4, + 0x05, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x52, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, - 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, - 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, - 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, - 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, - 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, - 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, - 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, - 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, - 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, - 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, - 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, - 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, - 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, + 0x1a, 0x39, 0x0a, 0x0a, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x51, 0x0a, 0x25, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, + 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x22, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, + 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x12, 0x27, 0x0a, 0x23, + 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, + 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, + 0x45, 0x5f, 0x33, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x34, 0x10, 0x04, 0x12, 0x27, + 0x0a, 0x23, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x44, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, + 0x4d, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x4f, 0x44, 0x45, 0x5f, 0x35, 0x10, 0x05, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0xa0, 0x05, 0x0a, 0x0a, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, + 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, + 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, + 0x73, 0x6d, 0x52, 0x18, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, + 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, + 0x77, 0x0a, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x65, 0x71, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, + 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, + 0x71, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, - 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, - 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, - 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, + 0x65, 0x71, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, - 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, - 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, + 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb4, 0x04, + 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, + 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, + 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, + 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, + 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, + 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, + 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, + 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, - 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, - 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, - 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, - 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, - 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, - 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, - 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, - 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, - 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, - 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, + 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, + 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, + 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, + 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, + 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, + 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, + 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, - 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, - 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, - 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, - 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, - 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, - 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, - 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, - 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, - 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, + 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, + 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, - 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2158,7 +2176,7 @@ func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) -var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []any{ (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation @@ -2183,7 +2201,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol - (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (*common_go_proto.Identity)(nil), // 24: s2a.proto.v2.Identity (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion @@ -2191,7 +2209,7 @@ var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ } var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol - 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.v2.Identity 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration @@ -2203,7 +2221,7 @@ var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext - 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.v2.Identity 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq @@ -2238,7 +2256,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AlpnPolicy); i { case 0: return &v.state @@ -2250,7 +2268,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*AuthenticationMechanism); i { case 0: return &v.state @@ -2262,7 +2280,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Status); i { case 0: return &v.state @@ -2274,7 +2292,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationReq); i { case 0: return &v.state @@ -2286,7 +2304,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp); i { case 0: return &v.state @@ -2298,7 +2316,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationReq); i { case 0: return &v.state @@ -2310,7 +2328,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*OffloadPrivateKeyOperationResp); i { case 0: return &v.state @@ -2322,7 +2340,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationReq); i { case 0: return &v.state @@ -2334,7 +2352,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*OffloadResumptionKeyOperationResp); i { case 0: return &v.state @@ -2346,7 +2364,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq); i { case 0: return &v.state @@ -2358,7 +2376,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainResp); i { case 0: return &v.state @@ -2370,7 +2388,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v any, i int) any { switch v := v.(*SessionReq); i { case 0: return &v.state @@ -2382,7 +2400,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v any, i int) any { switch v := v.(*SessionResp); i { case 0: return &v.state @@ -2394,7 +2412,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { case 0: return &v.state @@ -2406,7 +2424,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v any, i int) any { switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { case 0: return &v.state @@ -2418,7 +2436,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { case 0: return &v.state @@ -2430,7 +2448,7 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { return nil } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v any, i int) any { switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { case 0: return &v.state @@ -2443,30 +2461,30 @@ func file_internal_proto_v2_s2a_s2a_proto_init() { } } } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []any{ (*AuthenticationMechanism_Token)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []any{ (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []any{ (*OffloadPrivateKeyOperationReq_RawBytes)(nil), (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []any{ (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []any{ (*SessionReq_GetTlsConfigurationReq)(nil), (*SessionReq_OffloadPrivateKeyOperationReq)(nil), (*SessionReq_OffloadResumptionKeyOperationReq)(nil), (*SessionReq_ValidatePeerCertificateChainReq)(nil), } - file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []any{ (*SessionResp_GetTlsConfigurationResp)(nil), (*SessionResp_OffloadPrivateKeyOperationResp)(nil), (*SessionResp_OffloadResumptionKeyOperationResp)(nil), diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go index 2566df6c304..c93f75a78b0 100644 --- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.3.0 +// - protoc-gen-go-grpc v1.4.0 // - protoc v3.21.12 // source: internal/proto/v2/s2a/s2a.proto @@ -29,8 +29,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 +// Requires gRPC-Go v1.62.0 or later. +const _ = grpc.SupportPackageIsVersion8 const ( S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" @@ -54,11 +54,12 @@ func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { } func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { - stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &s2AServiceSetUpSessionClient{stream} + x := &s2AServiceSetUpSessionClient{ClientStream: stream} return x, nil } @@ -115,7 +116,7 @@ func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { } func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{ServerStream: stream}) } type S2AService_SetUpSessionServer interface { diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go index c60515510a7..e76509ef01a 100644 --- a/vendor/github.com/google/s2a-go/internal/record/record.go +++ b/vendor/github.com/google/s2a-go/internal/record/record.go @@ -378,11 +378,6 @@ func (p *conn) Read(b []byte) (n int, err error) { if len(p.handshakeBuf) > 0 { return 0, errors.New("application data received while processing fragmented handshake messages") } - if p.ticketState == receivingTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } case alert: return 0, p.handleAlertMessage() case handshake: @@ -500,17 +495,7 @@ func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex i } func (p *conn) Close() error { - p.readMutex.Lock() - defer p.readMutex.Unlock() - p.writeMutex.Lock() - defer p.writeMutex.Unlock() - // If p.ticketState is equal to notReceivingTickets, then S2A has - // been sent a flight of session tickets, and we must wait for the - // call to S2A to complete before closing the record protocol. - if p.ticketState == notReceivingTickets { - <-p.callComplete - grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") - } + // Close the connection immediately. return p.Conn.Close() } @@ -663,7 +648,7 @@ func (p *conn) handleHandshakeMessage() error { // Several handshake messages may be coalesced into a single record. // Continue reading them until the handshake buffer is empty. for len(p.handshakeBuf) > 0 { - handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + handshakeMsgType, msgLen, msg, _, ok := p.parseHandshakeMsg() if !ok { // The handshake could not be fully parsed, so read in another // record and try again later. @@ -681,20 +666,7 @@ func (p *conn) handleHandshakeMessage() error { return err } case tlsHandshakeNewSessionTicketType: - // Ignore tickets that are received after a batch of tickets has - // been sent to S2A. - if p.ticketState == notReceivingTickets { - continue - } - if p.ticketState == ticketsNotYetReceived { - p.ticketState = receivingTickets - } - p.sessionTickets = append(p.sessionTickets, rawMsg) - if len(p.sessionTickets) == maxAllowedTickets { - p.ticketState = notReceivingTickets - grpclog.Infof("Sending session tickets to S2A.") - p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) - } + // Do nothing for session ticket. default: return errors.New("unknown handshake message type") } diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go index ec96ba3b6a6..4057e70c8ad 100644 --- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go +++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -23,7 +23,8 @@ import ( "fmt" "os" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) const ( @@ -37,7 +38,7 @@ type AccessTokenManager interface { DefaultToken() (token string, err error) // Token returns a token that an application with local identity equal to // identity must use to authenticate to S2A. - Token(identity *commonpb.Identity) (token string, err error) + Token(identity interface{}) (token string, err error) } type singleTokenAccessTokenManager struct { @@ -65,6 +66,14 @@ func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { } // Token always returns the token managed by the singleTokenAccessTokenManager. -func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { +func (m *singleTokenAccessTokenManager) Token(identity interface{}) (string, error) { + switch v := identity.(type) { + case *commonpbv1.Identity: + // valid type. + case *commonpb.Identity: + // valid type. + default: + return "", fmt.Errorf("Incorrect identity type: %v", v) + } return m.token, nil } diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index 85a8379d833..a6402ee48cc 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -28,7 +28,6 @@ import ( "os" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" @@ -38,8 +37,9 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -59,9 +59,9 @@ type s2av2TransportCreds struct { transportCreds credentials.TransportCredentials tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. - localIdentity *commonpbv1.Identity + localIdentity *commonpb.Identity // localIdentities should only be used by the server. - localIdentities []*commonpbv1.Identity + localIdentities []*commonpb.Identity verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode fallbackClientHandshake fallback.ClientHandshake getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) @@ -70,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -101,7 +101,7 @@ func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCre // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -183,13 +183,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(timeoutCtx, - func() error { - conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) - return err - }) + conn, authInfo, err := creds.ClientHandshake(timeoutCtx, serverName, rawConn) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -197,7 +191,7 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori } return nil, nil, err } - grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + grpclog.Infof("client-side handshake is done using S2Av2 to: %s", serverName) return conn, authInfo, err } @@ -247,13 +241,7 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } creds := credentials.NewTLS(config) - var conn net.Conn - var authInfo credentials.AuthInfo - retry.Run(ctx, - func() error { - conn, authInfo, err = creds.ServerHandshake(rawConn) - return err - }) + conn, authInfo, err := creds.ServerHandshake(rawConn) if err != nil { grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) return nil, nil, err @@ -280,15 +268,15 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { tokenManager = *c.tokenManager } verificationMode := c.verificationMode - var localIdentity *commonpbv1.Identity + var localIdentity *commonpb.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) } - var localIdentities []*commonpbv1.Identity + var localIdentities []*commonpb.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) } } creds := &s2av2TransportCreds{ diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go index 4d919132295..fa0002e36b7 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -33,7 +33,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" - commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -44,8 +43,8 @@ const ( ) // GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. -func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { - authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpb.Identity{localIdentity}) if grpclog.V(1) { grpclog.Infof("Sending request to S2Av2 for client TLS config.") @@ -126,7 +125,7 @@ func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStr } // GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. -func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { return &tls.Config{ GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), }, nil @@ -136,7 +135,7 @@ func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager token // connection with a client, based on SNI communicated during ClientHello. // Ensures that server presents the correct certificate to establish a TLS // connection. -func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) if err != nil { @@ -219,9 +218,9 @@ func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { } } -func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { authMechanisms := getAuthMechanisms(tokenManager, localIdentities) - var locID *commonpbv1.Identity + var locID *commonpb.Identity if localIdentities != nil { locID = localIdentities[0] } @@ -283,7 +282,7 @@ func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsCo return clientAuth } -func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpb.Identity) []*s2av2pb.AuthenticationMechanism { if tokenManager == nil { return nil } diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 5ecb06f930e..cc79bd09a67 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -29,7 +29,6 @@ import ( "sync" "time" - "github.com/golang/protobuf/proto" "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/internal/handshaker" "github.com/google/s2a-go/internal/handshaker/service" @@ -38,8 +37,10 @@ import ( "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" - commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" ) @@ -54,17 +55,17 @@ const ( // credentials.TransportCredentials interface. type s2aTransportCreds struct { info *credentials.ProtocolInfo - minTLSVersion commonpb.TLSVersion - maxTLSVersion commonpb.TLSVersion + minTLSVersion commonpbv1.TLSVersion + maxTLSVersion commonpbv1.TLSVersion // tlsCiphersuites contains the ciphersuites used in the S2A connection. // Note that these are currently unconfigurable. - tlsCiphersuites []commonpb.Ciphersuite + tlsCiphersuites []commonpbv1.Ciphersuite // localIdentity should only be used by the client. - localIdentity *commonpb.Identity + localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. - localIdentities []*commonpb.Identity + localIdentities []*commonpbv1.Identity // targetIdentities should only be used by the client. - targetIdentities []*commonpb.Identity + targetIdentities []*commonpbv1.Identity isClient bool s2aAddr string ensureProcessSessionTickets *sync.WaitGroup @@ -76,7 +77,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil client options") } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity for _, targetIdentity := range opts.TargetIdentities { protoTargetIdentity, err := toProtoIdentity(targetIdentity) if err != nil { @@ -93,12 +94,12 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentity: localIdentity, targetIdentities: targetIdentities, @@ -112,7 +113,11 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + v2LocalIdentity, err := toV2ProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -121,7 +126,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro if opts == nil { return nil, errors.New("nil server options") } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity for _, localIdentity := range opts.LocalIdentities { protoLocalIdentity, err := toProtoIdentity(localIdentity) if err != nil { @@ -134,12 +139,12 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro info: &credentials.ProtocolInfo{ SecurityProtocol: s2aSecurityProtocol, }, - minTLSVersion: commonpb.TLSVersion_TLS1_3, - maxTLSVersion: commonpb.TLSVersion_TLS1_3, - tlsCiphersuites: []commonpb.Ciphersuite{ - commonpb.Ciphersuite_AES_128_GCM_SHA256, - commonpb.Ciphersuite_AES_256_GCM_SHA384, - commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + minTLSVersion: commonpbv1.TLSVersion_TLS1_3, + maxTLSVersion: commonpbv1.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpbv1.Ciphersuite{ + commonpbv1.Ciphersuite_AES_128_GCM_SHA256, + commonpbv1.Ciphersuite_AES_256_GCM_SHA384, + commonpbv1.Ciphersuite_CHACHA20_POLY1305_SHA256, }, localIdentities: localIdentities, isClient: false, @@ -147,7 +152,15 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) + var v2LocalIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toV2ProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + v2LocalIdentities = append(v2LocalIdentities, protoLocalIdentity) + } + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, v2LocalIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -248,22 +261,22 @@ func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { info := *c.info - var localIdentity *commonpb.Identity + var localIdentity *commonpbv1.Identity if c.localIdentity != nil { - localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) } - var localIdentities []*commonpb.Identity + var localIdentities []*commonpbv1.Identity if c.localIdentities != nil { - localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) for i, localIdentity := range c.localIdentities { - localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) } } - var targetIdentities []*commonpb.Identity + var targetIdentities []*commonpbv1.Identity if c.targetIdentities != nil { - targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + targetIdentities = make([]*commonpbv1.Identity, len(c.targetIdentities)) for i, targetIdentity := range c.targetIdentities { - targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpbv1.Identity) } } return &s2aTransportCreds{ @@ -351,6 +364,12 @@ func getVerificationMode(verificationMode VerificationModeType) s2av2pb.Validate return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE case Spiffe: return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + case ReservedCustomVerificationMode3: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_3 + case ReservedCustomVerificationMode4: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_4 + case ReservedCustomVerificationMode5: + return s2av2pb.ValidatePeerCertificateChainReq_RESERVED_CUSTOM_VERIFICATION_MODE_5 default: return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED } @@ -396,24 +415,20 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net defer cancel() var s2aTLSConfig *tls.Config + var c net.Conn retry.Run(timeoutCtx, func() error { s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ ServerName: serverName, }) - return err - }) - if err != nil { - grpclog.Infof("error building S2A TLS config: %v", err) - return fallback(err) - } + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return err + } - s2aDialer := &tls.Dialer{ - Config: s2aTLSConfig, - } - var c net.Conn - retry.Run(timeoutCtx, - func() error { + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } c, err = s2aDialer.DialContext(timeoutCtx, network, addr) return err }) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index fcdbc1621bd..5bbf31bf412 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -28,7 +28,8 @@ import ( "github.com/google/s2a-go/stream" "google.golang.org/grpc/credentials" - s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" ) // Identity is the interface for S2A identities. @@ -76,9 +77,12 @@ type VerificationModeType int // Three types of verification modes. const ( - Unspecified = iota - ConnectToGoogle + Unspecified VerificationModeType = iota Spiffe + ConnectToGoogle + ReservedCustomVerificationMode3 + ReservedCustomVerificationMode4 + ReservedCustomVerificationMode5 ) // ClientOptions contains the client-side options used to establish a secure @@ -198,7 +202,23 @@ func DefaultServerOptions(s2aAddress string) *ServerOptions { } } -func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { +func toProtoIdentity(identity Identity) (*s2apbv1.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apbv1.Identity{IdentityOneof: &s2apbv1.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} + +func toV2ProtoIdentity(identity Identity) (*s2apb.Identity, error) { if identity == nil { return nil, nil } diff --git a/vendor/github.com/gorilla/handlers/.editorconfig b/vendor/github.com/gorilla/handlers/.editorconfig new file mode 100644 index 00000000000..c6b74c3e0d0 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/handlers/.gitignore b/vendor/github.com/gorilla/handlers/.gitignore new file mode 100644 index 00000000000..577a89e8138 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.gitignore @@ -0,0 +1,2 @@ +# Output of the go test coverage tool +coverage.coverprofile diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE index 66ea3c8ae71..bb9d80bc9b6 100644 --- a/vendor/github.com/gorilla/handlers/LICENSE +++ b/vendor/github.com/gorilla/handlers/LICENSE @@ -1,22 +1,27 @@ -Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +modification, are permitted provided that the following conditions are +met: - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/Makefile b/vendor/github.com/gorilla/handlers/Makefile new file mode 100644 index 00000000000..003b784f7ed --- /dev/null +++ b/vendor/github.com/gorilla/handlers/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: verify +verify: sec govulncheck lint test + +.PHONY: lint +lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint #####" + golangci-lint run -v + +.PHONY: sec +sec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec #####" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck #####" + govulncheck ./... + +.PHONY: test +test: + @echo "##### Running tests #####" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md index 6eba66bf302..02555b2642c 100644 --- a/vendor/github.com/gorilla/handlers/README.md +++ b/vendor/github.com/gorilla/handlers/README.md @@ -1,10 +1,10 @@ -gorilla/handlers -================ +# gorilla/handlers + +![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) +[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) [![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) -[![CircleCI](https://circleci.com/gh/gorilla/handlers.svg?style=svg)](https://circleci.com/gh/gorilla/handlers) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) - Package handlers is a collection of handlers (aka "HTTP middleware") for use with Go's `net/http` package (or any framework supporting `http.Handler`), including: diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go index 8437fefc1ef..7121f5307be 100644 --- a/vendor/github.com/gorilla/handlers/canonical.go +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -21,12 +21,11 @@ type canonical struct { // // Example: // -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) // +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { fn := func(h http.Handler) http.Handler { return canonical{h, domain, code} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go index 1e95f1ccbfa..d6f589503b5 100644 --- a/vendor/github.com/gorilla/handlers/compress.go +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -44,13 +44,13 @@ type flusher interface { Flush() error } -func (w *compressResponseWriter) Flush() { +func (cw *compressResponseWriter) Flush() { // Flush compressed data if compressor supports it. - if f, ok := w.compressor.(flusher); ok { - f.Flush() + if f, ok := cw.compressor.(flusher); ok { + _ = f.Flush() } // Flush HTTP response. - if f, ok := w.w.(http.Flusher); ok { + if f, ok := cw.w.(http.Flusher); ok { f.Flush() } } diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go index 0dcdffb3d32..8af9c096e5e 100644 --- a/vendor/github.com/gorilla/handlers/cors.go +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -26,14 +26,14 @@ type cors struct { type OriginValidator func(string) bool var ( - defaultCorsOptionStatusCode = 200 - defaultCorsMethods = []string{"GET", "HEAD", "POST"} + defaultCorsOptionStatusCode = http.StatusOK + defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} - // (WebKit/Safari v9 sends the Origin header by default in AJAX requests) + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). ) const ( - corsOptionMethod string = "OPTIONS" + corsOptionMethod string = http.MethodOptions corsAllowOriginHeader string = "Access-Control-Allow-Origin" corsExposeHeadersHeader string = "Access-Control-Expose-Headers" corsMaxAgeHeader string = "Access-Control-Max-Age" @@ -101,10 +101,8 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ch.isMatch(method, defaultCorsMethods) { w.Header().Set(corsAllowMethodsHeader, method) } - } else { - if len(ch.exposedHeaders) > 0 { - w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) - } + } else if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) } if ch.allowCredentials { @@ -141,22 +139,21 @@ func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { // CORS provides Cross-Origin Resource Sharing middleware. // Example: // -// import ( -// "net/http" -// -// "github.com/gorilla/handlers" -// "github.com/gorilla/mux" -// ) +// import ( +// "net/http" // -// func main() { -// r := mux.NewRouter() -// r.HandleFunc("/users", UserEndpoint) -// r.HandleFunc("/projects", ProjectEndpoint) +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) // -// // Apply the CORS middleware to our top-level router, with the defaults. -// http.ListenAndServe(":8000", handlers.CORS()(r)) -// } +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) // +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } func CORS(opts ...CORSOption) func(http.Handler) http.Handler { return func(h http.Handler) http.Handler { ch := parseCORSOptions(opts...) @@ -174,7 +171,7 @@ func parseCORSOptions(opts ...CORSOption) *cors { } for _, option := range opts { - option(ch) + _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? } return ch diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go index 0509482ad7a..9b92fce3333 100644 --- a/vendor/github.com/gorilla/handlers/handlers.go +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -35,7 +35,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(allow) w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { + if req.Method == http.MethodOptions { w.WriteHeader(http.StatusOK) } else { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -44,7 +44,7 @@ func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { } // responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP -// status code and body size +// status code and body size. type responseLogger struct { w http.ResponseWriter status int @@ -97,7 +97,7 @@ func isContentType(h http.Header, contentType string) bool { // Only PUT, POST, and PATCH requests are considered. func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { + if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { h.ServeHTTP(w, r) return } @@ -108,7 +108,10 @@ func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { return } } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", + r.Header.Get("Content-Type"), + contentTypes), + http.StatusUnsupportedMediaType) }) } @@ -133,12 +136,12 @@ const ( // Form method takes precedence over header method. func HTTPMethodOverrideHandler(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { + if r.Method == http.MethodPost { om := r.FormValue(HTTPMethodOverrideFormKey) if om == "" { om = r.Header.Get(HTTPMethodOverrideHeader) } - if om == "PUT" || om == "PATCH" || om == "DELETE" { + if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { r.Method = om } } diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go index 228465eba00..2badb6fbff8 100644 --- a/vendor/github.com/gorilla/handlers/logging.go +++ b/vendor/github.com/gorilla/handlers/logging.go @@ -18,7 +18,7 @@ import ( // Logging -// LogFormatterParams is the structure any formatter will be handed when time to log comes +// LogFormatterParams is the structure any formatter will be handed when time to log comes. type LogFormatterParams struct { Request *http.Request URL url.URL @@ -27,7 +27,7 @@ type LogFormatterParams struct { Size int } -// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler +// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. type LogFormatter func(writer io.Writer, params LogFormatterParams) // loggingHandler is the http.Handler implementation for LoggingHandlerTo and its @@ -46,7 +46,10 @@ func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { h.handler.ServeHTTP(w, req) if req.MultipartForm != nil { - req.MultipartForm.RemoveAll() + err := req.MultipartForm.RemoveAll() + if err != nil { + return + } } params := LogFormatterParams{ @@ -76,7 +79,7 @@ const lowerhex = "0123456789abcdef" func appendQuoted(buf []byte, s string) []byte { var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { + for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 r := rune(s[0]) width = 1 if r >= utf8.RuneSelf { @@ -191,7 +194,7 @@ func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int func writeLog(writer io.Writer, params LogFormatterParams) { buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) buf = append(buf, '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. @@ -204,7 +207,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { buf = append(buf, `" "`...) buf = appendQuoted(buf, params.Request.UserAgent()) buf = append(buf, '"', '\n') - writer.Write(buf) + _, _ = writer.Write(buf) } // CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in @@ -212,7 +215,7 @@ func writeCombinedLog(writer io.Writer, params LogFormatterParams) { // // See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. // -// LoggingHandler always sets the ident field of the log to - +// LoggingHandler always sets the ident field of the log to -. func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeCombinedLog} } @@ -226,19 +229,18 @@ func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("This is a catch-all route")) -// }) -// loggedRouter := handlers.LoggingHandler(os.Stdout, r) -// http.ListenAndServe(":1123", loggedRouter) -// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) func LoggingHandler(out io.Writer, h http.Handler) http.Handler { return loggingHandler{out, h, writeLog} } // CustomLoggingHandler provides a way to supply a custom log formatter -// while taking advantage of the mechanisms in this package +// while taking advantage of the mechanisms in this package. func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { return loggingHandler{out, h, f} } diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go index ed939dcef5d..281d753e95a 100644 --- a/vendor/github.com/gorilla/handlers/proxy_headers.go +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -18,7 +18,7 @@ var ( var ( // RFC7239 defines a new "Forwarded: " header designed to replace the // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. forwarded = http.CanonicalHeaderKey("Forwarded") // Allows for a sub-match of the first value after 'for=' to the next // comma, semi-colon or space. The match is case-insensitive. @@ -67,7 +67,9 @@ func ProxyHeaders(h http.Handler) http.Handler { func getIP(r *http.Request) string { var addr string - if fwd := r.Header.Get(xForwardedFor); fwd != "" { + switch { + case r.Header.Get(xForwardedFor) != "": + fwd := r.Header.Get(xForwardedFor) // Only grab the first (client) address. Note that '192.168.0.1, // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after // the first may represent forwarding proxies earlier in the chain. @@ -76,17 +78,15 @@ func getIP(r *http.Request) string { s = len(fwd) } addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { + case r.Header.Get(xRealIP) != "": + addr = r.Header.Get(xRealIP) + case r.Header.Get(forwarded) != "": // match should contain at least two elements if the protocol was // specified in the Forwarded header. The first element will always be // the 'for=' capture, which we ignore. In the case of multiple IP // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { // IPv6 addresses in Forwarded headers are quoted-strings. We strip // these quotes. addr = strings.Trim(match[1], `"`) diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go index 4c4c1d9c6ce..0d4f955ecbd 100644 --- a/vendor/github.com/gorilla/handlers/recovery.go +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -36,12 +36,12 @@ func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { // // Example: // -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// panic("Unexpected error!") -// }) +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) // -// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { return func(h http.Handler) http.Handler { r := &recoveryHandler{handler: h} @@ -50,20 +50,22 @@ func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { } // RecoveryLogger is a functional option to override -// the default logger +// the default logger. func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? r.logger = logger } } // PrintRecoveryStack is a functional option to enable // or disable printing stack traces on panic. -func PrintRecoveryStack(print bool) RecoveryOption { +func PrintRecoveryStack(shouldPrint bool) RecoveryOption { return func(h http.Handler) { - r := h.(*recoveryHandler) - r.printStack = print + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.printStack = shouldPrint } } diff --git a/vendor/github.com/grafana/dskit/test/diff.go b/vendor/github.com/grafana/dskit/test/diff.go new file mode 100644 index 00000000000..3cc7ea63892 --- /dev/null +++ b/vendor/github.com/grafana/dskit/test/diff.go @@ -0,0 +1,28 @@ +// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/test/diff.go +// Provenance-includes-license: Apache-2.0 +// Provenance-includes-copyright: Weaveworks Ltd. + +package test + +import ( + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +// Diff diffs two arbitrary data structures, giving human-readable output. +func Diff(want, have interface{}) string { + config := spew.NewDefaultConfig() + // Set ContinueOnMethod to true if you cannot see a difference and + // want to look beyond the String() method + config.ContinueOnMethod = false + config.SortKeys = true + config.SpewKeys = true + text, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(config.Sdump(want)), + B: difflib.SplitLines(config.Sdump(have)), + FromFile: "want", + ToFile: "have", + Context: 3, + }) + return "\n" + text +} diff --git a/vendor/github.com/grafana/dskit/test/poll.go b/vendor/github.com/grafana/dskit/test/poll.go new file mode 100644 index 00000000000..05ba41235ac --- /dev/null +++ b/vendor/github.com/grafana/dskit/test/poll.go @@ -0,0 +1,26 @@ +package test + +import ( + "reflect" + "testing" + "time" +) + +// Poll repeatedly calls a function until the function returns the correct response or until poll timeout. +func Poll(t testing.TB, d time.Duration, want interface{}, have func() interface{}) { + t.Helper() + deadline := time.Now().Add(d) + for { + if time.Now().After(deadline) { + break + } + if reflect.DeepEqual(want, have()) { + return + } + time.Sleep(d / 100) + } + h := have() + if !reflect.DeepEqual(want, h) { + t.Fatalf("expected %v, got %v", want, h) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go deleted file mode 100644 index 3e905cf1e32..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/common.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_logging - -import ( - "context" - "io" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ErrorToCode function determines the error code of an error -// This makes using custom errors with grpc middleware easier -type ErrorToCode func(err error) codes.Code - -func DefaultErrorToCode(err error) codes.Code { - return status.Code(err) -} - -// Decider function defines rules for suppressing any interceptor logs -type Decider func(fullMethodName string, err error) bool - -// DefaultDeciderMethod is the default implementation of decider to see if you should log the call -// by default this if always true so all calls are logged -func DefaultDeciderMethod(fullMethodName string, err error) bool { - return true -} - -// ServerPayloadLoggingDecider is a user-provided function for deciding whether to log the server-side -// request/response payloads -type ServerPayloadLoggingDecider func(ctx context.Context, fullMethodName string, servingObject interface{}) bool - -// ClientPayloadLoggingDecider is a user-provided function for deciding whether to log the client-side -// request/response payloads -type ClientPayloadLoggingDecider func(ctx context.Context, fullMethodName string) bool - -// JsonPbMarshaller is a marshaller that serializes protobuf messages. -type JsonPbMarshaler interface { - Marshal(out io.Writer, pb proto.Message) error -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go deleted file mode 100644 index d8fcea081a6..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// -/* -grpc_logging is a "parent" package for gRPC logging middlewares. - -General functionality of all middleware - -The gRPC logging middleware populates request-scoped data to `grpc_ctxtags.Tags` that relate to the current gRPC call -(e.g. service and method names). - -Once the gRPC logging middleware has added the gRPC specific Tags to the ctx they will then be written with the logs -that are made using the `ctx_logrus` or `ctx_zap` loggers. - -All logging middleware will emit a final log statement. It is based on the error returned by the handler function, -the gRPC status code, an error (if any) and it will emit at a level controlled via `WithLevels`. - -This parent package - -This particular package is intended for use by other middleware, logging or otherwise. It contains interfaces that other -logging middlewares *could* share . This allows code to be shared between different implementations. - -Field names - -All field names of loggers follow the OpenTracing semantics definitions, with `grpc.` prefix if needed: -https://github.com/opentracing/specification/blob/master/semantic_conventions.md - -Implementations - -There are three implementations at the moment: logrus, zap and kit - -See relevant packages below. -*/ -package grpc_logging diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go deleted file mode 100644 index c447ec6b9e5..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// -/* -grpc_logsettable contains a thread-safe wrapper around grpc-logging -infrastructure. - -The go-grpc assumes that logger can be only configured once as the `SetLoggerV2` -method is: -```Not mutex-protected, should be called before any gRPC functions.``` - -This package allows to supply parent logger once ("before any grpc"), but -later change underlying implementation in thread-safe way when needed. - -It's in particular useful for testing, where each testcase might need its own -logger. -*/ -package grpc_logsettable diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go deleted file mode 100644 index 9e403b2b2d0..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/settable/logsettable.go +++ /dev/null @@ -1,99 +0,0 @@ -package grpc_logsettable - -import ( - "io/ioutil" - "sync" - - "google.golang.org/grpc/grpclog" -) - -// SettableLoggerV2 is thread-safe. -type SettableLoggerV2 interface { - grpclog.LoggerV2 - // Sets given logger as the underlying implementation. - Set(loggerv2 grpclog.LoggerV2) - // Sets `discard` logger as the underlying implementation. - Reset() -} - -// ReplaceGrpcLoggerV2 creates and configures SettableLoggerV2 as grpc logger. -func ReplaceGrpcLoggerV2() SettableLoggerV2 { - settable := &settableLoggerV2{} - settable.Reset() - grpclog.SetLoggerV2(settable) - return settable -} - -// SettableLoggerV2 implements SettableLoggerV2 -type settableLoggerV2 struct { - log grpclog.LoggerV2 - mu sync.RWMutex -} - -func (s *settableLoggerV2) Set(log grpclog.LoggerV2) { - s.mu.Lock() - defer s.mu.Unlock() - s.log = log -} - -func (s *settableLoggerV2) Reset() { - s.Set(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) -} - -func (s *settableLoggerV2) get() grpclog.LoggerV2 { - s.mu.RLock() - defer s.mu.RUnlock() - return s.log -} - -func (s *settableLoggerV2) Info(args ...interface{}) { - s.get().Info(args) -} - -func (s *settableLoggerV2) Infoln(args ...interface{}) { - s.get().Infoln(args) -} - -func (s *settableLoggerV2) Infof(format string, args ...interface{}) { - s.get().Infof(format, args) -} - -func (s *settableLoggerV2) Warning(args ...interface{}) { - s.get().Warning(args) -} - -func (s *settableLoggerV2) Warningln(args ...interface{}) { - s.get().Warningln(args) -} - -func (s *settableLoggerV2) Warningf(format string, args ...interface{}) { - s.get().Warningf(format, args) -} - -func (s *settableLoggerV2) Error(args ...interface{}) { - s.get().Error(args) -} - -func (s *settableLoggerV2) Errorln(args ...interface{}) { - s.get().Errorln(args) -} - -func (s *settableLoggerV2) Errorf(format string, args ...interface{}) { - s.get().Errorf(format, args) -} - -func (s *settableLoggerV2) Fatal(args ...interface{}) { - s.get().Fatal(args) -} - -func (s *settableLoggerV2) Fatalln(args ...interface{}) { - s.get().Fatalln(args) -} - -func (s *settableLoggerV2) Fatalf(format string, args ...interface{}) { - s.get().Fatalf(format, args) -} - -func (s *settableLoggerV2) V(l int) bool { - return s.get().V(l) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go deleted file mode 100644 index de80c1cc02e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/client_interceptors.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_zap - -import ( - "context" - "path" - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // ClientField is used in every client-side log statement made through grpc_zap. Can be overwritten before initialization. - ClientField = zap.String("span.kind", "client") -) - -// UnaryClientInterceptor returns a new unary client interceptor that optionally logs the execution of external gRPC calls. -func UnaryClientInterceptor(logger *zap.Logger, opts ...Option) grpc.UnaryClientInterceptor { - o := evaluateClientOpt(opts) - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - fields := newClientLoggerFields(ctx, method) - startTime := time.Now() - err := invoker(ctx, method, req, reply, cc, opts...) - newCtx := ctxzap.ToContext(ctx, logger.With(fields...)) - logFinalClientLine(newCtx, o, startTime, err, "finished client unary call") - return err - } -} - -// StreamClientInterceptor returns a new streaming client interceptor that optionally logs the execution of external gRPC calls. -func StreamClientInterceptor(logger *zap.Logger, opts ...Option) grpc.StreamClientInterceptor { - o := evaluateClientOpt(opts) - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - fields := newClientLoggerFields(ctx, method) - startTime := time.Now() - clientStream, err := streamer(ctx, desc, cc, method, opts...) - newCtx := ctxzap.ToContext(ctx, logger.With(fields...)) - logFinalClientLine(newCtx, o, startTime, err, "finished client streaming call") - return clientStream, err - } -} - -func logFinalClientLine(ctx context.Context, o *options, startTime time.Time, err error, msg string) { - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Now().Sub(startTime)) - o.messageFunc(ctx, msg, level, code, err, duration) -} - -func newClientLoggerFields(ctx context.Context, fullMethodString string) []zapcore.Field { - service := path.Dir(fullMethodString)[1:] - method := path.Base(fullMethodString) - return []zapcore.Field{ - SystemField, - ClientField, - zap.String("grpc.service", service), - zap.String("grpc.method", method), - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go deleted file mode 100644 index 56f6408a620..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/context.go +++ /dev/null @@ -1,21 +0,0 @@ -package grpc_zap - -import ( - "context" - - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// AddFields adds zap fields to the logger. -// Deprecated: should use the ctxzap.AddFields instead -func AddFields(ctx context.Context, fields ...zapcore.Field) { - ctxzap.AddFields(ctx, fields...) -} - -// Extract takes the call-scoped Logger from grpc_zap middleware. -// Deprecated: should use the ctxzap.Extract instead -func Extract(ctx context.Context) *zap.Logger { - return ctxzap.Extract(ctx) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go deleted file mode 100644 index 1d8ae49a19b..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/context.go +++ /dev/null @@ -1,88 +0,0 @@ -package ctxzap - -import ( - "context" - - "github.com/grpc-ecosystem/go-grpc-middleware/tags" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type ctxMarker struct{} - -type ctxLogger struct { - logger *zap.Logger - fields []zapcore.Field -} - -var ( - ctxMarkerKey = &ctxMarker{} - nullLogger = zap.NewNop() -) - -// AddFields adds zap fields to the logger. -func AddFields(ctx context.Context, fields ...zapcore.Field) { - l, ok := ctx.Value(ctxMarkerKey).(*ctxLogger) - if !ok || l == nil { - return - } - l.fields = append(l.fields, fields...) -} - -// Extract takes the call-scoped Logger from grpc_zap middleware. -// -// It always returns a Logger that has all the grpc_ctxtags updated. -func Extract(ctx context.Context) *zap.Logger { - l, ok := ctx.Value(ctxMarkerKey).(*ctxLogger) - if !ok || l == nil { - return nullLogger - } - // Add grpc_ctxtags tags metadata until now. - fields := TagsToFields(ctx) - // Add zap fields added until now. - fields = append(fields, l.fields...) - return l.logger.With(fields...) -} - -// TagsToFields transforms the Tags on the supplied context into zap fields. -func TagsToFields(ctx context.Context) []zapcore.Field { - fields := []zapcore.Field{} - tags := grpc_ctxtags.Extract(ctx) - for k, v := range tags.Values() { - fields = append(fields, zap.Any(k, v)) - } - return fields -} - -// ToContext adds the zap.Logger to the context for extraction later. -// Returning the new context that has been created. -func ToContext(ctx context.Context, logger *zap.Logger) context.Context { - l := &ctxLogger{ - logger: logger, - } - return context.WithValue(ctx, ctxMarkerKey, l) -} - -// Debug is equivalent to calling Debug on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Debug(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Debug(msg, fields...) -} - -// Info is equivalent to calling Info on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Info(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Info(msg, fields...) -} - -// Warn is equivalent to calling Warn on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Warn(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Warn(msg, fields...) -} - -// Error is equivalent to calling Error on the zap.Logger in the context. -// It is a no-op if the context does not contain a zap.Logger. -func Error(ctx context.Context, msg string, fields ...zap.Field) { - Extract(ctx).WithOptions(zap.AddCallerSkip(1)).Error(msg, fields...) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go deleted file mode 100644 index 3591e3585ea..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -`ctxzap` is a ctxlogger that is backed by Zap - -It accepts a user-configured `zap.Logger` that will be used for logging. The same `zap.Logger` will -be populated into the `context.Context` passed into gRPC handler code. - -You can use `ctxzap.Extract` to log into a request-scoped `zap.Logger` instance in your handler code. - -As `ctxzap.Extract` will iterate all tags on from `grpc_ctxtags` it is therefore expensive so it is advised that you -extract once at the start of the function from the context and reuse it for the remainder of the function (see examples). - -Please see examples and tests for examples of use. -*/ -package ctxzap diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go deleted file mode 100644 index ffa6b5c3194..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/doc.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -`grpc_zap` is a gRPC logging middleware backed by ZAP loggers - -It accepts a user-configured `zap.Logger` that will be used for logging completed gRPC calls. The same `zap.Logger` will -be used for logging completed gRPC calls, and be populated into the `context.Context` passed into gRPC handler code. - -On calling `StreamServerInterceptor` or `UnaryServerInterceptor` this logging middleware will add gRPC call information -to the ctx so that it will be present on subsequent use of the `ctx_zap` logger. - -If a deadline is present on the gRPC request the grpc.request.deadline tag is populated when the request begins. grpc.request.deadline -is a string representing the time (RFC3339) when the current call will expire. - -This package also implements request and response *payload* logging, both for server-side and client-side. These will be -logged as structured `jsonpb` fields for every message received/sent (both unary and streaming). For that please use -`Payload*Interceptor` functions for that. Please note that the user-provided function that determines whether to log -the full request/response payload needs to be written with care, this can significantly slow down gRPC. - -ZAP can also be made as a backend for gRPC library internals. For that use `ReplaceGrpcLoggerV2`. - - -*Server Interceptor* -Below is a JSON formatted example of a log that would be logged by the server interceptor: - - { - "level": "info", // string zap log levels - "msg": "finished unary call", // string log message - - "grpc.code": "OK", // string grpc status code - "grpc.method": "Ping", // string method name - "grpc.service": "mwitkow.testproto.TestService", // string full name of the called service - "grpc.start_time": "2006-01-02T15:04:05Z07:00", // string RFC3339 representation of the start time - "grpc.request.deadline": "2006-01-02T15:04:05Z07:00", // string RFC3339 deadline of the current request if supplied - "grpc.request.value": "something", // string value on the request - "grpc.time_ms": 1.345, // float32 run time of the call in ms - - "peer.address": { - "IP": "127.0.0.1", // string IP address of calling party - "Port": 60216, // int port call is coming in on - "Zone": "" // string peer zone for caller - }, - "span.kind": "server", // string client | server - "system": "grpc" // string - - "custom_field": "custom_value", // string user defined field - "custom_tags.int": 1337, // int user defined tag on the ctx - "custom_tags.string": "something", // string user defined tag on the ctx - } - -*Payload Interceptor* -Below is a JSON formatted example of a log that would be logged by the payload interceptor: - - { - "level": "info", // string zap log levels - "msg": "client request payload logged as grpc.request.content", // string log message - - "grpc.request.content": { // object content of RPC request - "msg" : { // object ZAP specific inner object - "value": "something", // string defined by caller - "sleepTimeMs": 9999 // int defined by caller - } - }, - "grpc.method": "Ping", // string method being called - "grpc.service": "mwitkow.testproto.TestService", // string service being called - - "span.kind": "client", // string client | server - "system": "grpc" // string - } - -Note - due to implementation ZAP differs from Logrus in the "grpc.request.content" object by having an inner "msg" object. - - -Please see examples and tests for examples of use. -Please see settable_test.go for canonical integration through "zaptest" with golang testing infrastructure. -*/ -package grpc_zap diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go deleted file mode 100644 index 4cdee602048..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/grpclogger.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_zap - -import ( - "fmt" - - grpc_logsettable "github.com/grpc-ecosystem/go-grpc-middleware/logging/settable" - "go.uber.org/zap" - "google.golang.org/grpc/grpclog" -) - -// ReplaceGrpcLogger sets the given zap.Logger as a gRPC-level logger. -// This should be called *before* any other initialization, preferably from init() functions. -// Deprecated: use ReplaceGrpcLoggerV2. -func ReplaceGrpcLogger(logger *zap.Logger) { - zgl := &zapGrpcLogger{logger.With(SystemField, zap.Bool("grpc_log", true))} - grpclog.SetLogger(zgl) -} - -type zapGrpcLogger struct { - logger *zap.Logger -} - -func (l *zapGrpcLogger) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLogger) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Print(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLogger) Printf(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLogger) Println(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -// ReplaceGrpcLoggerV2 replaces the grpclog.LoggerV2 with the provided logger. -// It should be called before any gRPC functions. Logging verbosity defaults to info level. -// To adjust gRPC logging verbosity, see ReplaceGrpcLoggerV2WithVerbosity. -func ReplaceGrpcLoggerV2(logger *zap.Logger) { - ReplaceGrpcLoggerV2WithVerbosity(logger, 0) -} - -// ReplaceGrpcLoggerV2WithVerbosity replaces the grpclog.Logger with the provided logger and verbosity. -// It should be called before any gRPC functions. -// verbosity correlates to grpclogs verbosity levels. A higher verbosity value results in less logging. -func ReplaceGrpcLoggerV2WithVerbosity(logger *zap.Logger, verbosity int) { - zgl := &zapGrpcLoggerV2{ - logger: logger.With(SystemField, zap.Bool("grpc_log", true)).WithOptions(zap.AddCallerSkip(2)), - verbosity: verbosity, - } - grpclog.SetLoggerV2(zgl) -} - -// SetGrpcLoggerV2 replaces the grpc_log.Logger with the provided logger. -// It can be used even when grpc infrastructure was initialized. -func SetGrpcLoggerV2(settable grpc_logsettable.SettableLoggerV2, logger *zap.Logger) { - SetGrpcLoggerV2WithVerbosity(settable, logger, 0) -} - -// SetGrpcLoggerV2WithVerbosity replaces the grpc_.LoggerV2 with the provided logger and verbosity. -// It can be used even when grpc infrastructure was initialized. -func SetGrpcLoggerV2WithVerbosity(settable grpc_logsettable.SettableLoggerV2, logger *zap.Logger, verbosity int) { - zgl := &zapGrpcLoggerV2{ - logger: logger.With(SystemField, zap.Bool("grpc_log", true)), - verbosity: verbosity, - } - settable.Set(zgl) -} - -type zapGrpcLoggerV2 struct { - logger *zap.Logger - verbosity int -} - -func (l *zapGrpcLoggerV2) Info(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Infoln(args ...interface{}) { - l.logger.Info(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Infof(format string, args ...interface{}) { - l.logger.Info(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Warning(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Warningln(args ...interface{}) { - l.logger.Warn(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Warningf(format string, args ...interface{}) { - l.logger.Warn(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Error(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Errorln(args ...interface{}) { - l.logger.Error(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Errorf(format string, args ...interface{}) { - l.logger.Error(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) Fatal(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Fatalln(args ...interface{}) { - l.logger.Fatal(fmt.Sprint(args...)) -} - -func (l *zapGrpcLoggerV2) Fatalf(format string, args ...interface{}) { - l.logger.Fatal(fmt.Sprintf(format, args...)) -} - -func (l *zapGrpcLoggerV2) V(level int) bool { - // Check whether the verbosity of the current log ('level') is within the specified threshold ('l.verbosity'). - // As in https://github.com/grpc/grpc-go/blob/41e044e1c82fcf6a5801d6cbd7ecf952505eecb1/grpclog/loggerv2.go#L199-L201. - return level <= l.verbosity -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go deleted file mode 100644 index b59db9ad69e..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/options.go +++ /dev/null @@ -1,217 +0,0 @@ -package grpc_zap - -import ( - "context" - "time" - - grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc/codes" -) - -var ( - defaultOptions = &options{ - levelFunc: DefaultCodeToLevel, - shouldLog: grpc_logging.DefaultDeciderMethod, - codeFunc: grpc_logging.DefaultErrorToCode, - durationFunc: DefaultDurationToField, - messageFunc: DefaultMessageProducer, - timestampFormat: time.RFC3339, - } -) - -type options struct { - levelFunc CodeToLevel - shouldLog grpc_logging.Decider - codeFunc grpc_logging.ErrorToCode - durationFunc DurationToField - messageFunc MessageProducer - timestampFormat string -} - -func evaluateServerOpt(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - optCopy.levelFunc = DefaultCodeToLevel - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -func evaluateClientOpt(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - optCopy.levelFunc = DefaultClientCodeToLevel - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -type Option func(*options) - -// CodeToLevel function defines the mapping between gRPC return codes and interceptor log level. -type CodeToLevel func(code codes.Code) zapcore.Level - -// DurationToField function defines how to produce duration fields for logging -type DurationToField func(duration time.Duration) zapcore.Field - -// WithDecider customizes the function for deciding if the gRPC interceptor logs should log. -func WithDecider(f grpc_logging.Decider) Option { - return func(o *options) { - o.shouldLog = f - } -} - -// WithLevels customizes the function for mapping gRPC return codes and interceptor log level statements. -func WithLevels(f CodeToLevel) Option { - return func(o *options) { - o.levelFunc = f - } -} - -// WithCodes customizes the function for mapping errors to error codes. -func WithCodes(f grpc_logging.ErrorToCode) Option { - return func(o *options) { - o.codeFunc = f - } -} - -// WithDurationField customizes the function for mapping request durations to Zap fields. -func WithDurationField(f DurationToField) Option { - return func(o *options) { - o.durationFunc = f - } -} - -// WithMessageProducer customizes the function for message formation. -func WithMessageProducer(f MessageProducer) Option { - return func(o *options) { - o.messageFunc = f - } -} - -// WithTimestampFormat customizes the timestamps emitted in the log fields. -func WithTimestampFormat(format string) Option { - return func(o *options) { - o.timestampFormat = format - } -} - -// DefaultCodeToLevel is the default implementation of gRPC return codes and interceptor log level for server side. -func DefaultCodeToLevel(code codes.Code) zapcore.Level { - switch code { - case codes.OK: - return zap.InfoLevel - case codes.Canceled: - return zap.InfoLevel - case codes.Unknown: - return zap.ErrorLevel - case codes.InvalidArgument: - return zap.InfoLevel - case codes.DeadlineExceeded: - return zap.WarnLevel - case codes.NotFound: - return zap.InfoLevel - case codes.AlreadyExists: - return zap.InfoLevel - case codes.PermissionDenied: - return zap.WarnLevel - case codes.Unauthenticated: - return zap.InfoLevel // unauthenticated requests can happen - case codes.ResourceExhausted: - return zap.WarnLevel - case codes.FailedPrecondition: - return zap.WarnLevel - case codes.Aborted: - return zap.WarnLevel - case codes.OutOfRange: - return zap.WarnLevel - case codes.Unimplemented: - return zap.ErrorLevel - case codes.Internal: - return zap.ErrorLevel - case codes.Unavailable: - return zap.WarnLevel - case codes.DataLoss: - return zap.ErrorLevel - default: - return zap.ErrorLevel - } -} - -// DefaultClientCodeToLevel is the default implementation of gRPC return codes to log levels for client side. -func DefaultClientCodeToLevel(code codes.Code) zapcore.Level { - switch code { - case codes.OK: - return zap.DebugLevel - case codes.Canceled: - return zap.DebugLevel - case codes.Unknown: - return zap.InfoLevel - case codes.InvalidArgument: - return zap.DebugLevel - case codes.DeadlineExceeded: - return zap.InfoLevel - case codes.NotFound: - return zap.DebugLevel - case codes.AlreadyExists: - return zap.DebugLevel - case codes.PermissionDenied: - return zap.InfoLevel - case codes.Unauthenticated: - return zap.InfoLevel // unauthenticated requests can happen - case codes.ResourceExhausted: - return zap.DebugLevel - case codes.FailedPrecondition: - return zap.DebugLevel - case codes.Aborted: - return zap.DebugLevel - case codes.OutOfRange: - return zap.DebugLevel - case codes.Unimplemented: - return zap.WarnLevel - case codes.Internal: - return zap.WarnLevel - case codes.Unavailable: - return zap.WarnLevel - case codes.DataLoss: - return zap.WarnLevel - default: - return zap.InfoLevel - } -} - -// DefaultDurationToField is the default implementation of converting request duration to a Zap field. -var DefaultDurationToField = DurationToTimeMillisField - -// DurationToTimeMillisField converts the duration to milliseconds and uses the key `grpc.time_ms`. -func DurationToTimeMillisField(duration time.Duration) zapcore.Field { - return zap.Float32("grpc.time_ms", durationToMilliseconds(duration)) -} - -// DurationToDurationField uses a Duration field to log the request duration -// and leaves it up to Zap's encoder settings to determine how that is output. -func DurationToDurationField(duration time.Duration) zapcore.Field { - return zap.Duration("grpc.duration", duration) -} - -func durationToMilliseconds(duration time.Duration) float32 { - return float32(duration.Nanoseconds()/1000) / 1000 -} - -// MessageProducer produces a user defined log message -type MessageProducer func(ctx context.Context, msg string, level zapcore.Level, code codes.Code, err error, duration zapcore.Field) - -// DefaultMessageProducer writes the default message -func DefaultMessageProducer(ctx context.Context, msg string, level zapcore.Level, code codes.Code, err error, duration zapcore.Field) { - // re-extract logger from newCtx, as it may have extra fields that changed in the holder. - ctxzap.Extract(ctx).Check(level, msg).Write( - zap.Error(err), - zap.String("grpc.code", code.String()), - duration, - ) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go deleted file mode 100644 index 329019b1722..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/payload_interceptors.go +++ /dev/null @@ -1,150 +0,0 @@ -package grpc_zap - -import ( - "bytes" - "context" - "fmt" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/go-grpc-middleware/logging" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // JsonPbMarshaller is the marshaller used for serializing protobuf messages. - // If needed, this variable can be reassigned with a different marshaller with the same Marshal() signature. - JsonPbMarshaller grpc_logging.JsonPbMarshaler = &jsonpb.Marshaler{} -) - -// PayloadUnaryServerInterceptor returns a new unary server interceptors that logs the payloads of requests. -// -// This *only* works when placed *after* the `grpc_zap.UnaryServerInterceptor`. However, the logging can be done to a -// separate instance of the logger. -func PayloadUnaryServerInterceptor(logger *zap.Logger, decider grpc_logging.ServerPayloadLoggingDecider) grpc.UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - if !decider(ctx, info.FullMethod, info.Server) { - return handler(ctx, req) - } - // Use the provided zap.Logger for logging but use the fields from context. - logEntry := logger.With(append(serverCallFields(info.FullMethod), ctxzap.TagsToFields(ctx)...)...) - logProtoMessageAsJson(logEntry, req, "grpc.request.content", "server request payload logged as grpc.request.content field") - resp, err := handler(ctx, req) - if err == nil { - logProtoMessageAsJson(logEntry, resp, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return resp, err - } -} - -// PayloadStreamServerInterceptor returns a new server server interceptors that logs the payloads of requests. -// -// This *only* works when placed *after* the `grpc_zap.StreamServerInterceptor`. However, the logging can be done to a -// separate instance of the logger. -func PayloadStreamServerInterceptor(logger *zap.Logger, decider grpc_logging.ServerPayloadLoggingDecider) grpc.StreamServerInterceptor { - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - if !decider(stream.Context(), info.FullMethod, srv) { - return handler(srv, stream) - } - logEntry := logger.With(append(serverCallFields(info.FullMethod), ctxzap.TagsToFields(stream.Context())...)...) - newStream := &loggingServerStream{ServerStream: stream, logger: logEntry} - return handler(srv, newStream) - } -} - -// PayloadUnaryClientInterceptor returns a new unary client interceptor that logs the payloads of requests and responses. -func PayloadUnaryClientInterceptor(logger *zap.Logger, decider grpc_logging.ClientPayloadLoggingDecider) grpc.UnaryClientInterceptor { - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - if !decider(ctx, method) { - return invoker(ctx, method, req, reply, cc, opts...) - } - logEntry := logger.With(newClientLoggerFields(ctx, method)...) - logProtoMessageAsJson(logEntry, req, "grpc.request.content", "client request payload logged as grpc.request.content") - err := invoker(ctx, method, req, reply, cc, opts...) - if err == nil { - logProtoMessageAsJson(logEntry, reply, "grpc.response.content", "client response payload logged as grpc.response.content") - } - return err - } -} - -// PayloadStreamClientInterceptor returns a new streaming client interceptor that logs the payloads of requests and responses. -func PayloadStreamClientInterceptor(logger *zap.Logger, decider grpc_logging.ClientPayloadLoggingDecider) grpc.StreamClientInterceptor { - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - if !decider(ctx, method) { - return streamer(ctx, desc, cc, method, opts...) - } - logEntry := logger.With(newClientLoggerFields(ctx, method)...) - clientStream, err := streamer(ctx, desc, cc, method, opts...) - newStream := &loggingClientStream{ClientStream: clientStream, logger: logEntry} - return newStream, err - } -} - -type loggingClientStream struct { - grpc.ClientStream - logger *zap.Logger -} - -func (l *loggingClientStream) SendMsg(m interface{}) error { - err := l.ClientStream.SendMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.request.content", "server request payload logged as grpc.request.content field") - } - return err -} - -func (l *loggingClientStream) RecvMsg(m interface{}) error { - err := l.ClientStream.RecvMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return err -} - -type loggingServerStream struct { - grpc.ServerStream - logger *zap.Logger -} - -func (l *loggingServerStream) SendMsg(m interface{}) error { - err := l.ServerStream.SendMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.response.content", "server response payload logged as grpc.response.content field") - } - return err -} - -func (l *loggingServerStream) RecvMsg(m interface{}) error { - err := l.ServerStream.RecvMsg(m) - if err == nil { - logProtoMessageAsJson(l.logger, m, "grpc.request.content", "server request payload logged as grpc.request.content field") - } - return err -} - -func logProtoMessageAsJson(logger *zap.Logger, pbMsg interface{}, key string, msg string) { - if p, ok := pbMsg.(proto.Message); ok { - logger.Check(zapcore.InfoLevel, msg).Write(zap.Object(key, &jsonpbObjectMarshaler{pb: p})) - } -} - -type jsonpbObjectMarshaler struct { - pb proto.Message -} - -func (j *jsonpbObjectMarshaler) MarshalLogObject(e zapcore.ObjectEncoder) error { - // ZAP jsonEncoder deals with AddReflect by using json.MarshalObject. The same thing applies for consoleEncoder. - return e.AddReflected("msg", j) -} - -func (j *jsonpbObjectMarshaler) MarshalJSON() ([]byte, error) { - b := &bytes.Buffer{} - if err := JsonPbMarshaller.Marshal(b, j.pb); err != nil { - return nil, fmt.Errorf("jsonpb serializer failed: %v", err) - } - return b.Bytes(), nil -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go deleted file mode 100644 index 1db035897eb..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/server_interceptors.go +++ /dev/null @@ -1,85 +0,0 @@ -package grpc_zap - -import ( - "context" - "path" - "time" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - "google.golang.org/grpc" -) - -var ( - // SystemField is used in every log statement made through grpc_zap. Can be overwritten before any initialization code. - SystemField = zap.String("system", "grpc") - - // ServerField is used in every server-side log statement made through grpc_zap.Can be overwritten before initialization. - ServerField = zap.String("span.kind", "server") -) - -// UnaryServerInterceptor returns a new unary server interceptors that adds zap.Logger to the context. -func UnaryServerInterceptor(logger *zap.Logger, opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateServerOpt(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - startTime := time.Now() - - newCtx := newLoggerForCall(ctx, logger, info.FullMethod, startTime, o.timestampFormat) - - resp, err := handler(newCtx, req) - if !o.shouldLog(info.FullMethod, err) { - return resp, err - } - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Since(startTime)) - - o.messageFunc(newCtx, "finished unary call with code "+code.String(), level, code, err, duration) - return resp, err - } -} - -// StreamServerInterceptor returns a new streaming server interceptor that adds zap.Logger to the context. -func StreamServerInterceptor(logger *zap.Logger, opts ...Option) grpc.StreamServerInterceptor { - o := evaluateServerOpt(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - startTime := time.Now() - newCtx := newLoggerForCall(stream.Context(), logger, info.FullMethod, startTime, o.timestampFormat) - wrapped := grpc_middleware.WrapServerStream(stream) - wrapped.WrappedContext = newCtx - - err := handler(srv, wrapped) - if !o.shouldLog(info.FullMethod, err) { - return err - } - code := o.codeFunc(err) - level := o.levelFunc(code) - duration := o.durationFunc(time.Since(startTime)) - - o.messageFunc(newCtx, "finished streaming call with code "+code.String(), level, code, err, duration) - return err - } -} - -func serverCallFields(fullMethodString string) []zapcore.Field { - service := path.Dir(fullMethodString)[1:] - method := path.Base(fullMethodString) - return []zapcore.Field{ - SystemField, - ServerField, - zap.String("grpc.service", service), - zap.String("grpc.method", method), - } -} - -func newLoggerForCall(ctx context.Context, logger *zap.Logger, fullMethodString string, start time.Time, timestampFormat string) context.Context { - var f []zapcore.Field - f = append(f, zap.String("grpc.start_time", start.Format(timestampFormat))) - if d, ok := ctx.Deadline(); ok { - f = append(f, zap.String("grpc.request.deadline", d.Format(timestampFormat))) - } - callLog := logger.With(append(f, serverCallFields(fullMethodString)...)...) - return ctxzap.ToContext(ctx, callLog) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go deleted file mode 100644 index ad35f09a87f..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_retry - -import ( - "time" - - "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" -) - -// BackoffLinear is very simple: it waits for a fixed period of time between calls. -func BackoffLinear(waitBetween time.Duration) BackoffFunc { - return func(attempt uint) time.Duration { - return waitBetween - } -} - -// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). -// -// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. -func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(waitBetween, jitterFraction) - } -} - -// BackoffExponential produces increasing intervals for each attempt. -// -// The scalar is multiplied times 2 raised to the current attempt. So the first -// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. -func BackoffExponential(scalar time.Duration) BackoffFunc { - return func(attempt uint) time.Duration { - return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) - } -} - -// BackoffExponentialWithJitter creates an exponential backoff like -// BackoffExponential does, but adds jitter. -func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { - return func(attempt uint) time.Duration { - return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go deleted file mode 100644 index 0da1658bbe8..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/context.go +++ /dev/null @@ -1,78 +0,0 @@ -package grpc_ctxtags - -import ( - "context" -) - -type ctxMarker struct{} - -var ( - // ctxMarkerKey is the Context value marker used by *all* logging middleware. - // The logging middleware object must interf - ctxMarkerKey = &ctxMarker{} - // NoopTags is a trivial, minimum overhead implementation of Tags for which all operations are no-ops. - NoopTags = &noopTags{} -) - -// Tags is the interface used for storing request tags between Context calls. -// The default implementation is *not* thread safe, and should be handled only in the context of the request. -type Tags interface { - // Set sets the given key in the metadata tags. - Set(key string, value interface{}) Tags - // Has checks if the given key exists. - Has(key string) bool - // Values returns a map of key to values. - // Do not modify the underlying map, please use Set instead. - Values() map[string]interface{} -} - -type mapTags struct { - values map[string]interface{} -} - -func (t *mapTags) Set(key string, value interface{}) Tags { - t.values[key] = value - return t -} - -func (t *mapTags) Has(key string) bool { - _, ok := t.values[key] - return ok -} - -func (t *mapTags) Values() map[string]interface{} { - return t.values -} - -type noopTags struct{} - -func (t *noopTags) Set(key string, value interface{}) Tags { - return t -} - -func (t *noopTags) Has(key string) bool { - return false -} - -func (t *noopTags) Values() map[string]interface{} { - return nil -} - -// Extracts returns a pre-existing Tags object in the Context. -// If the context wasn't set in a tag interceptor, a no-op Tag storage is returned that will *not* be propagated in context. -func Extract(ctx context.Context) Tags { - t, ok := ctx.Value(ctxMarkerKey).(Tags) - if !ok { - return NoopTags - } - - return t -} - -func SetInContext(ctx context.Context, tags Tags) context.Context { - return context.WithValue(ctx, ctxMarkerKey, tags) -} - -func NewTags() Tags { - return &mapTags{values: make(map[string]interface{})} -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go deleted file mode 100644 index 960638d0fa3..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -`grpc_ctxtags` adds a Tag object to the context that can be used by other middleware to add context about a request. - -Request Context Tags - -Tags describe information about the request, and can be set and used by other middleware, or handlers. Tags are used -for logging and tracing of requests. Tags are populated both upwards, *and* downwards in the interceptor-handler stack. - -You can automatically extract tags (in `grpc.request.`) from request payloads. - -For unary and server-streaming methods, pass in the `WithFieldExtractor` option. For client-streams and bidirectional-streams, you can -use `WithFieldExtractorForInitialReq` which will extract the tags from the first message passed from client to server. -Note the tags will not be modified for subsequent requests, so this option only makes sense when the initial message -establishes the meta-data for the stream. - -If a user doesn't use the interceptors that initialize the `Tags` object, all operations following from an `Extract(ctx)` -will be no-ops. This is to ensure that code doesn't panic if the interceptors weren't used. - -Tags fields are typed, and shallow and should follow the OpenTracing semantics convention: -https://github.com/opentracing/specification/blob/master/semantic_conventions.md -*/ -package grpc_ctxtags diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go deleted file mode 100644 index a4073ab49b2..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/fieldextractor.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -import ( - "reflect" -) - -// RequestFieldExtractorFunc is a user-provided function that extracts field information from a gRPC request. -// It is called from tags middleware on arrival of unary request or a server-stream request. -// Keys and values will be added to the context tags of the request. If there are no fields, you should return a nil. -type RequestFieldExtractorFunc func(fullMethod string, req interface{}) map[string]interface{} - -type requestFieldsExtractor interface { - // ExtractRequestFields is a method declared on a Protobuf message that extracts fields from the interface. - // The values from the extracted fields should be set in the appendToMap, in order to avoid allocations. - ExtractRequestFields(appendToMap map[string]interface{}) -} - -// CodeGenRequestFieldExtractor is a function that relies on code-generated functions that export log fields from requests. -// These are usually coming from a protoc-plugin that generates additional information based on custom field options. -func CodeGenRequestFieldExtractor(fullMethod string, req interface{}) map[string]interface{} { - if ext, ok := req.(requestFieldsExtractor); ok { - retMap := make(map[string]interface{}) - ext.ExtractRequestFields(retMap) - if len(retMap) == 0 { - return nil - } - return retMap - } - return nil -} - -// TagBasedRequestFieldExtractor is a function that relies on Go struct tags to export log fields from requests. -// These are usually coming from a protoc-plugin, such as Gogo protobuf. -// -// message Metadata { -// repeated string tags = 1 [ (gogoproto.moretags) = "log_field:\"meta_tags\"" ]; -// } -// -// The tagName is configurable using the tagName variable. Here it would be "log_field". -func TagBasedRequestFieldExtractor(tagName string) RequestFieldExtractorFunc { - return func(fullMethod string, req interface{}) map[string]interface{} { - retMap := make(map[string]interface{}) - reflectMessageTags(req, retMap, tagName) - if len(retMap) == 0 { - return nil - } - return retMap - } -} - -func reflectMessageTags(msg interface{}, existingMap map[string]interface{}, tagName string) { - v := reflect.ValueOf(msg) - // Only deal with pointers to structs. - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return - } - // Deref the pointer get to the struct. - v = v.Elem() - t := v.Type() - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - kind := field.Kind() - // Only recurse down direct pointers, which should only be to nested structs. - if (kind == reflect.Ptr || kind == reflect.Interface) && field.CanInterface() { - reflectMessageTags(field.Interface(), existingMap, tagName) - } - // In case of arrays/slices (repeated fields) go down to the concrete type. - if kind == reflect.Array || kind == reflect.Slice { - if field.Len() == 0 { - continue - } - kind = field.Index(0).Kind() - } - // Only be interested in - if (kind >= reflect.Bool && kind <= reflect.Float64) || kind == reflect.String { - if tag := t.Field(i).Tag.Get(tagName); tag != "" { - existingMap[tag] = field.Interface() - } - } - } - return -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go deleted file mode 100644 index a7ced60f5b9..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/interceptors.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -import ( - "context" - - "google.golang.org/grpc" - "google.golang.org/grpc/peer" - - "github.com/grpc-ecosystem/go-grpc-middleware" -) - -// UnaryServerInterceptor returns a new unary server interceptors that sets the values for request tags. -func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { - o := evaluateOptions(opts) - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newCtx := newTagsForCtx(ctx) - if o.requestFieldsFunc != nil { - setRequestFieldTags(newCtx, o.requestFieldsFunc, info.FullMethod, req) - } - return handler(newCtx, req) - } -} - -// StreamServerInterceptor returns a new streaming server interceptor that sets the values for request tags. -func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { - o := evaluateOptions(opts) - return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - newCtx := newTagsForCtx(stream.Context()) - if o.requestFieldsFunc == nil { - // Short-circuit, don't do the expensive bit of allocating a wrappedStream. - wrappedStream := grpc_middleware.WrapServerStream(stream) - wrappedStream.WrappedContext = newCtx - return handler(srv, wrappedStream) - } - wrapped := &wrappedStream{stream, info, o, newCtx, true} - err := handler(srv, wrapped) - return err - } -} - -// wrappedStream is a thin wrapper around grpc.ServerStream that allows modifying context and extracts log fields from the initial message. -type wrappedStream struct { - grpc.ServerStream - info *grpc.StreamServerInfo - opts *options - // WrappedContext is the wrapper's own Context. You can assign it. - WrappedContext context.Context - initial bool -} - -// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() -func (w *wrappedStream) Context() context.Context { - return w.WrappedContext -} - -func (w *wrappedStream) RecvMsg(m interface{}) error { - err := w.ServerStream.RecvMsg(m) - // We only do log fields extraction on the single-request of a server-side stream. - if !w.info.IsClientStream || w.opts.requestFieldsFromInitial && w.initial { - w.initial = false - - setRequestFieldTags(w.Context(), w.opts.requestFieldsFunc, w.info.FullMethod, m) - } - return err -} - -func newTagsForCtx(ctx context.Context) context.Context { - t := NewTags() - if peer, ok := peer.FromContext(ctx); ok { - t.Set("peer.address", peer.Addr.String()) - } - return SetInContext(ctx, t) -} - -func setRequestFieldTags(ctx context.Context, f RequestFieldExtractorFunc, fullMethodName string, req interface{}) { - if valMap := f(fullMethodName, req); valMap != nil { - t := Extract(ctx) - for k, v := range valMap { - t.Set("grpc.request."+k, v) - } - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go deleted file mode 100644 index 952775f88d4..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/tags/options.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2017 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_ctxtags - -var ( - defaultOptions = &options{ - requestFieldsFunc: nil, - } -) - -type options struct { - requestFieldsFunc RequestFieldExtractorFunc - requestFieldsFromInitial bool -} - -func evaluateOptions(opts []Option) *options { - optCopy := &options{} - *optCopy = *defaultOptions - for _, o := range opts { - o(optCopy) - } - return optCopy -} - -type Option func(*options) - -// WithFieldExtractor customizes the function for extracting log fields from protobuf messages, for -// unary and server-streamed methods only. -func WithFieldExtractor(f RequestFieldExtractorFunc) Option { - return func(o *options) { - o.requestFieldsFunc = f - } -} - -// WithFieldExtractorForInitialReq customizes the function for extracting log fields from protobuf messages, -// for all unary and streaming methods. For client-streams and bidirectional-streams, the tags will be -// extracted from the first message from the client. -func WithFieldExtractorForInitialReq(f RequestFieldExtractorFunc) Option { - return func(o *options) { - o.requestFieldsFunc = f - o.requestFieldsFromInitial = true - } -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go deleted file mode 100644 index 4e69a6305aa..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -Backoff Helper Utilities - -Implements common backoff features. -*/ -package backoffutils - -import ( - "math/rand" - "time" -) - -// JitterUp adds random jitter to the duration. -// -// This adds or subtracts time from the duration within a given jitter fraction. -// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) -func JitterUp(duration time.Duration, jitter float64) time.Duration { - multiplier := jitter * (rand.Float64()*2 - 1) - return time.Duration(float64(duration) * (1 + multiplier)) -} - -// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. -func ExponentBase2(a uint) uint { - return (1 << a) >> 1 -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go deleted file mode 100644 index 1ed9bb499b3..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside -Context handlers. - -While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) -they are hard to use. - -The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example -the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context -metadata. - - nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") - clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) -*/ - -package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go deleted file mode 100644 index 15225d710ac..00000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package metautils - -import ( - "context" - "strings" - - "google.golang.org/grpc/metadata" -) - -// NiceMD is a convenience wrapper defining extra functions on the metadata. -type NiceMD metadata.MD - -// ExtractIncoming extracts an inbound metadata from the server-side context. -// -// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns -// a new empty NiceMD. -func ExtractIncoming(ctx context.Context) NiceMD { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return NiceMD(metadata.Pairs()) - } - return NiceMD(md) -} - -// ExtractOutgoing extracts an outbound metadata from the client-side context. -// -// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns -// a new empty NiceMD. -func ExtractOutgoing(ctx context.Context) NiceMD { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - return NiceMD(metadata.Pairs()) - } - return NiceMD(md) -} - -// Clone performs a *deep* copy of the metadata.MD. -// -// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed -// all keys get copied. -func (m NiceMD) Clone(copiedKeys ...string) NiceMD { - newMd := NiceMD(metadata.Pairs()) - for k, vv := range m { - found := false - if len(copiedKeys) == 0 { - found = true - } else { - for _, allowedKey := range copiedKeys { - if strings.EqualFold(allowedKey, k) { - found = true - break - } - } - } - if !found { - continue - } - newMd[k] = make([]string, len(vv)) - copy(newMd[k], vv) - } - return newMd -} - -// ToOutgoing sets the given NiceMD as a client-side context for dispatching. -func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { - return metadata.NewOutgoingContext(ctx, metadata.MD(m)) -} - -// ToIncoming sets the given NiceMD as a server-side context for dispatching. -// -// This is mostly useful in ServerInterceptors.. -func (m NiceMD) ToIncoming(ctx context.Context) context.Context { - return metadata.NewIncomingContext(ctx, metadata.MD(m)) -} - -// Get retrieves a single value from the metadata. -// -// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, -// an empty string is returned. -// -// The function is binary-key safe. -func (m NiceMD) Get(key string) string { - k := strings.ToLower(key) - vv, ok := m[k] - if !ok { - return "" - } - return vv[0] -} - -// Del retrieves a single value from the metadata. -// -// It works analogously to http.Header.Del, deleting all values if they exist. -// -// The function is binary-key safe. - -func (m NiceMD) Del(key string) NiceMD { - k := strings.ToLower(key) - delete(m, k) - return m -} - -// Set sets the given value in a metadata. -// -// It works analogously to http.Header.Set, overwriting all previous metadata values. -// -// The function is binary-key safe. -func (m NiceMD) Set(key string, value string) NiceMD { - k := strings.ToLower(key) - m[k] = []string{value} - return m -} - -// Add retrieves a single value from the metadata. -// -// It works analogously to http.Header.Add, as it appends to any existing values associated with key. -// -// The function is binary-key safe. -func (m NiceMD) Add(key string, value string) NiceMD { - k := strings.ToLower(key) - m[k] = append(m[k], value) - return m -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT new file mode 100644 index 00000000000..3b13627cdbb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT @@ -0,0 +1,2 @@ +Copyright (c) The go-grpc-middleware Authors. +Licensed under the Apache License 2.0. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE new file mode 100644 index 00000000000..b2b065037fc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go new file mode 100644 index 00000000000..5b8aaa683f1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/backoff.go @@ -0,0 +1,55 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package retry + +import ( + "context" + "math/rand" + "time" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return waitBetween + } +} + +// jitterUp adds random jitter to the duration. +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// exponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func exponentBase2(a uint) uint { + return (1 << a) >> 1 +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return scalar * time.Duration(exponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(ctx context.Context, attempt uint) time.Duration { + return jitterUp(scalar*time.Duration(exponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go similarity index 68% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go index f8ba7198a5a..cad659022ee 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/doc.go @@ -1,10 +1,10 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. /* -`grpc_retry` provides client-side request retry logic for gRPC. +Package retry provides client-side request retry logic for gRPC. -Client-Side Request Retry Interceptor +# Client-Side Request Retry Interceptor It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status of the reply. It supports unary (1:1), and server stream (1:n) requests. @@ -12,14 +12,14 @@ of the reply. It supports unary (1:1), and server stream (1:n) requests. By default the interceptors *are disabled*, preventing accidental use of retries. You can easily override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: - myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms linear backoff with 10% jitter. For chained interceptors, the retry interceptor will call every interceptor that follows it -whenever a retry happens. +whenever when a retry happens. Please see examples for more advanced use. */ -package grpc_retry +package retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go similarity index 66% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go index 7a633e29347..e1466598679 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/options.go @@ -1,7 +1,7 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. -package grpc_retry +package retry import ( "context" @@ -9,6 +9,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( @@ -22,10 +23,11 @@ var ( max: 0, // disabled perCallTimeout: 0, // disabled includeHeader: true, - codes: DefaultRetriableCodes, - backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { - return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) + backoffFunc: BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + onRetryCallback: OnRetryCallback(func(ctx context.Context, attempt uint, err error) { + logTrace(ctx, "grpc_retry attempt: %d, backoff for %v", attempt, err) }), + retriableFunc: newRetriableFuncForCodes(DefaultRetriableCodes), } ) @@ -34,16 +36,14 @@ var ( // They are called with an identifier of the attempt, and should return a time the system client should // hold off for. If the time returned is longer than the `context.Context.Deadline` of the request // the deadline of the request takes precedence and the wait will be interrupted before proceeding -// with the next iteration. -type BackoffFunc func(attempt uint) time.Duration - -// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. -// -// They are called with an identifier of the attempt, and should return a time the system client should -// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request -// the deadline of the request takes precedence and the wait will be interrupted before proceeding // with the next iteration. The context can be used to extract request scoped metadata and context values. -type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration +type BackoffFunc func(ctx context.Context, attempt uint) time.Duration + +// OnRetryCallback is the type of function called when a retry occurs. +type OnRetryCallback func(ctx context.Context, attempt uint, err error) + +// RetriableFunc denotes a family of functions that control which error should be retried. +type RetriableFunc func(err error) bool // Disable disables the retry behaviour on this call, or this interceptor. // @@ -62,16 +62,16 @@ func WithMax(maxRetries uint) CallOption { // WithBackoff sets the `BackoffFunc` used to control time between retries. func WithBackoff(bf BackoffFunc) CallOption { return CallOption{applyFunc: func(o *options) { - o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { - return bf(attempt) - }) + o.backoffFunc = bf }} } -// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. -func WithBackoffContext(bf BackoffFuncContext) CallOption { +// WithOnRetryCallback sets the callback to use when a retry occurs. +// +// By default, when no callback function provided, we will just print a log to trace +func WithOnRetryCallback(fn OnRetryCallback) CallOption { return CallOption{applyFunc: func(o *options) { - o.backoffFunc = bf + o.onRetryCallback = fn }} } @@ -82,7 +82,7 @@ func WithBackoffContext(bf BackoffFuncContext) CallOption { // You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. func WithCodes(retryCodes ...codes.Code) CallOption { return CallOption{applyFunc: func(o *options) { - o.codes = retryCodes + o.retriableFunc = newRetriableFuncForCodes(retryCodes) }} } @@ -104,12 +104,20 @@ func WithPerRetryTimeout(timeout time.Duration) CallOption { }} } +// WithRetriable sets which error should be retried. +func WithRetriable(retriableFunc RetriableFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.retriableFunc = retriableFunc + }} +} + type options struct { - max uint - perCallTimeout time.Duration - includeHeader bool - codes []codes.Code - backoffFunc BackoffFuncContext + max uint + perCallTimeout time.Duration + includeHeader bool + backoffFunc BackoffFunc + onRetryCallback OnRetryCallback + retriableFunc RetriableFunc } // CallOption is a grpc.CallOption that is local to grpc_retry. @@ -140,3 +148,20 @@ func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOp } return grpcOptions, retryOptions } + +// newRetriableFuncForCodes returns retriable function for specific Codes. +func newRetriableFuncForCodes(codes []codes.Code) func(err error) bool { + return func(err error) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range codes { + if code == errCode { + return true + } + } + return false + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go similarity index 80% rename from vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go rename to vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go index 003bbd9066e..2b77084155f 100644 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry/retry.go @@ -1,25 +1,25 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. -package grpc_retry +package retry import ( "context" + "fmt" "io" - "strconv" "sync" "time" - "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata" "golang.org/x/net/trace" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" + grpcMetadata "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) const ( - AttemptMetadataKey = "x-retry-attempty" + AttemptMetadataKey = "x-retry-attempt" ) // UnaryClientInterceptor returns a new retrying unary client interceptor. @@ -28,7 +28,7 @@ const ( // changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) - return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(parentCtx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { grpcOpts, retryOpts := filterCallOptions(opts) callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) // short circuit for simplicity, and avoiding allocations. @@ -40,13 +40,14 @@ func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { return err } - callCtx := perCallContext(parentCtx, callOpts, attempt) + callCtx, cancel := perCallContext(parentCtx, callOpts, attempt) + defer cancel() // Clean up potential resources. lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) // TODO(mwitkow): Maybe dial and transport errors should be retriable? if lastErr == nil { return nil } - logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + callOpts.onRetryCallback(parentCtx, attempt, lastErr) if isContextError(lastErr) { if parentCtx.Err() != nil { logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) @@ -85,7 +86,7 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto return streamer(parentCtx, desc, cc, method, grpcOpts...) } if desc.ClientStreams { - return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + return nil, status.Error(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") } var lastErr error @@ -93,10 +94,8 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { return nil, err } - callCtx := perCallContext(parentCtx, callOpts, 0) - var newStreamer grpc.ClientStream - newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) + newStreamer, lastErr = streamer(parentCtx, desc, cc, method, grpcOpts...) if lastErr == nil { retryingStreamer := &serverStreamingRetryingStream{ ClientStream: newStreamer, @@ -108,8 +107,7 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto } return retryingStreamer, nil } - - logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + callOpts.onRetryCallback(parentCtx, attempt, lastErr) if isContextError(lastErr) { if parentCtx.Err() != nil { logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) @@ -135,8 +133,8 @@ func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientIntercepto // a new ClientStream according to the retry policy. type serverStreamingRetryingStream struct { grpc.ClientStream - bufferedSends []interface{} // single message that the client can sen - wasClosedSend bool // indicates that CloseSend was closed + bufferedSends []any // single message that the client can sen + wasClosedSend bool // indicates that CloseSend was closed parentCtx context.Context callOpts *options streamerCall func(ctx context.Context) (grpc.ClientStream, error) @@ -155,7 +153,7 @@ func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { return s.ClientStream } -func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) SendMsg(m any) error { s.mu.Lock() s.bufferedSends = append(s.bufferedSends, m) s.mu.Unlock() @@ -169,15 +167,15 @@ func (s *serverStreamingRetryingStream) CloseSend() error { return s.getStream().CloseSend() } -func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { +func (s *serverStreamingRetryingStream) Header() (grpcMetadata.MD, error) { return s.getStream().Header() } -func (s *serverStreamingRetryingStream) Trailer() metadata.MD { +func (s *serverStreamingRetryingStream) Trailer() grpcMetadata.MD { return s.getStream().Trailer() } -func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { +func (s *serverStreamingRetryingStream) RecvMsg(m any) error { attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) if !attemptRetry { return lastErr // success or hard failure @@ -187,8 +185,8 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { return err } - callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) - newStream, err := s.reestablishStreamAndResendBuffer(callCtx) + s.callOpts.onRetryCallback(s.parentCtx, attempt, lastErr) + newStream, err := s.reestablishStreamAndResendBuffer(s.parentCtx) if err != nil { // Retry dial and transport errors of establishing stream as grpc doesn't retry. if isRetriable(err, s.callOpts) { @@ -199,7 +197,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { s.setStream(newStream) attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) - //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { return lastErr } @@ -207,7 +205,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { return lastErr } -func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) { err := s.getStream().RecvMsg(m) if err == nil || err == io.EOF { return false, err @@ -226,9 +224,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return isRetriable(err, s.callOpts), err } -func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer( - callCtx context.Context, -) (grpc.ClientStream, error) { +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { s.mu.RLock() bufferedSends := s.bufferedSends s.mu.RUnlock() @@ -260,7 +256,9 @@ func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options timer := time.NewTimer(waitTime) select { case <-parentCtx.Done(): - timer.Stop() + if !timer.Stop() { + <-timer.C + } return contextErrToGrpcErr(parentCtx.Err()) case <-timer.C: } @@ -269,15 +267,8 @@ func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options } func isRetriable(err error, callOpts *options) bool { - errCode := status.Code(err) - if isContextError(err) { - // context errors are not retriable based on user settings. - return false - } - for _, code := range callOpts.codes { - if code == errCode { - return true - } + if callOpts.retriableFunc != nil { + return callOpts.retriableFunc(err) } return false } @@ -287,16 +278,18 @@ func isContextError(err error) bool { return code == codes.DeadlineExceeded || code == codes.Canceled } -func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) (context.Context, context.CancelFunc) { + cancel := context.CancelFunc(func() {}) + ctx := parentCtx if callOpts.perCallTimeout != 0 { - ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) + ctx, cancel = context.WithTimeout(ctx, callOpts.perCallTimeout) } if attempt > 0 && callOpts.includeHeader { - mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10)) + mdClone := metadata.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) ctx = mdClone.ToOutgoing(ctx) } - return ctx + return ctx, cancel } func contextErrToGrpcErr(err error) error { @@ -310,7 +303,7 @@ func contextErrToGrpcErr(err error) error { } } -func logTrace(ctx context.Context, format string, a ...interface{}) { +func logTrace(ctx context.Context, format string, a ...any) { tr, ok := trace.FromContext(ctx) if !ok { return diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go new file mode 100644 index 00000000000..3aac1b3648f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +/* +Package `metadata` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the MD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + md := metadata.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := md.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metadata diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go new file mode 100644 index 00000000000..2eeda06857c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/metadata.go @@ -0,0 +1,126 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "context" + "strings" + + grpcMetadata "google.golang.org/grpc/metadata" +) + +// MD is a convenience wrapper defining extra functions on the metadata. +type MD grpcMetadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a MD wrapper of the grpcMetadata.MD, in case the context doesn't have metadata it returns +// a new empty MD. +func ExtractIncoming(ctx context.Context) MD { + md, ok := grpcMetadata.FromIncomingContext(ctx) + if !ok { + return MD(grpcMetadata.Pairs()) + } + return MD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a MD wrapper of the grpcMetadata.MD, in case the context doesn't have metadata it returns +// a new empty MD. +func ExtractOutgoing(ctx context.Context) MD { + md, ok := grpcMetadata.FromOutgoingContext(ctx) + if !ok { + return MD(grpcMetadata.Pairs()) + } + return MD(md) +} + +// Clone performs a *deep* copy of the grpcMetadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m MD) Clone(copiedKeys ...string) MD { + newMd := MD(grpcMetadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return newMd +} + +// ToOutgoing sets the given MD as a client-side context for dispatching. +func (m MD) ToOutgoing(ctx context.Context) context.Context { + return grpcMetadata.NewOutgoingContext(ctx, grpcMetadata.MD(m)) +} + +// ToIncoming sets the given MD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors. +func (m MD) ToIncoming(ctx context.Context) context.Context { + return grpcMetadata.NewIncomingContext(ctx, grpcMetadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m MD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m MD) Del(key string) MD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m MD) Set(key string, value string) MD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m MD) Add(key string, value string) MD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go new file mode 100644 index 00000000000..e758c092f93 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata/single_key.go @@ -0,0 +1,21 @@ +// Copyright (c) The go-grpc-middleware Authors. +// Licensed under the Apache License 2.0. + +package metadata + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + return k, base64.StdEncoding.EncodeToString([]byte(v)) + } + return k, v +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel index 78d7c9f5c88..a65d88eb865 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -73,7 +73,7 @@ go_test( "@org_golang_google_genproto_googleapis_api//httpbody", "@org_golang_google_genproto_googleapis_rpc//errdetails", "@org_golang_google_genproto_googleapis_rpc//status", - "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//:grpc", "@org_golang_google_grpc//codes", "@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//metadata", diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index 5dd4e447862..2f2b342431d 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -49,6 +49,7 @@ var malformedHTTPHeaders = map[string]struct{}{ type ( rpcMethodKey struct{} httpPathPatternKey struct{} + httpPatternKey struct{} AnnotateContextOption func(ctx context.Context) context.Context ) @@ -404,3 +405,13 @@ func HTTPPathPattern(ctx context.Context) (string, bool) { func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) } + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb3f..2e50082ad11 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 5682998699a..41cd4f5030e 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is @@ -93,6 +108,7 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { // return Internal when Marshal failed const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` var customStatus *HTTPStatusError if errors.As(err, &customStatus) { @@ -100,19 +116,28 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh } s := status.Convert(err) - pb := s.Proto() w.Header().Del("Trailer") w.Header().Del("Transfer-Encoding") - contentType := marshaler.ContentType(pb) + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) w.Header().Set("Content-Type", contentType) if s.Code() == codes.Unauthenticated { w.Header().Set("WWW-Authenticate", s.Message()) } - buf, merr := marshaler.Marshal(pb) + buf, merr := marshaler.Marshal(respRw) if merr != nil { grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) w.WriteHeader(http.StatusInternalServerError) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0bf4..2fcd7af3c40 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index de1eef1f4f8..0fa90765661 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -3,6 +3,7 @@ package runtime import ( "context" "errors" + "fmt" "io" "net/http" "net/textproto" @@ -55,20 +56,33 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(resp)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte - httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) switch { - case resp == nil: + case respRw == nil: buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) case isHTTPBody: buf = httpBody.GetData() default: - result := map[string]interface{}{"result": resp} - if rb, ok := resp.(responseBody); ok { + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { result["result"] = rb.XXX_ResponseBody() } @@ -164,12 +178,17 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha HTTPError(ctx, mux, marshaler, w, req, err) return } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { + if rb, ok := respRw.(responseBody); ok { buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) } else { - buf, err = marshaler.Marshal(resp) + buf, err = marshaler.Marshal(respRw) } if err != nil { grpclog.Errorf("Marshal error: %v", err) @@ -181,7 +200,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } @@ -201,8 +220,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } for _, opt := range opts { if err := opt(ctx, w, resp); err != nil { - grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) - return err + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) } } return nil diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff493..b1dfc37af9b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e894..07c28112c89 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index ed9a7e4387d..60c2065ddcb 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -48,12 +48,19 @@ var encodedPathSplitter = regexp.MustCompile("(/|%2F)") // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { // handlers maps HTTP method to a list of handlers. handlers map[string][]handler + middlewares []Middleware forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter marshalers marshalerRegistry incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc @@ -69,6 +76,24 @@ type ServeMux struct { // ServeMuxOption is an option that can be given to a ServeMux on construction. type ServeMuxOption func(*ServeMux) +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + // WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. // // forwardResponseOption is an option that will be called on the relevant context.Context, @@ -89,6 +114,15 @@ func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { } } +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. // Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. @@ -277,13 +311,14 @@ func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMux // NewServeMux returns a new ServeMux whose internal mapping is empty. func NewServeMux(opts ...ServeMuxOption) *ServeMux { serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - errorHandler: DefaultHTTPErrorHandler, - streamErrorHandler: DefaultStreamErrorHandler, - routingErrorHandler: DefaultRoutingErrorHandler, - unescapingMode: UnescapingModeDefault, + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -305,6 +340,9 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) } @@ -405,7 +443,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } continue } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } @@ -458,7 +496,7 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } - h.h(w, r, pathParams) + s.handleHandler(h, w, r, pathParams) return } _, outboundMarshaler := MarshalerForRequest(s, r) @@ -484,3 +522,16 @@ type handler struct { pat Pattern h HandlerFunc } + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f20f..f710036b350 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b85..93fb09922fb 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -291,7 +291,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de4864a..38ca39cc538 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776c0..66aa5f2dcc5 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore deleted file mode 100644 index 4befed30a1c..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.DS_Store -.idea diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md deleted file mode 100644 index 3d0379c500e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md +++ /dev/null @@ -1,102 +0,0 @@ -## v1.6.0 - -CHANGES: - -* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing. - If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean - value in the `|`-separated protocol negotiation line. - -ENHANCEMENTS: - -* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)] -* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)] -* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)] -* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] -* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] -* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] - -## v1.5.2 - -ENHANCEMENTS: - -client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)] - -## v1.5.1 - -BUGS: - -* server: `PLUGIN_UNIX_SOCKET_DIR` is consistently used for gRPC broker sockets as well as the initial socket [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] - -ENHANCEMENTS: - -* client: New `UnixSocketConfig` option in `ClientConfig` to support making the client's Unix sockets group-writable [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)] - -## v1.5.0 - -ENHANCEMENTS: - -* client: New `runner.Runner` interface to support clients providing custom plugin command runner implementations [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] - * Accessible via new `ClientConfig` field `RunnerFunc`, which is mutually exclusive with `Cmd` and `Reattach` - * Reattaching support via `ReattachConfig` field `ReattachFunc` -* client: New `ClientConfig` field `SkipHostEnv` allows omitting the client process' own environment variables from the plugin command's environment [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] -* client: Add `ID()` method to `Client` for retrieving the pid or other unique ID of a running plugin [[GH-272](https://github.com/hashicorp/go-plugin/pull/272)] -* server: Support setting the directory to create Unix sockets in with the env var `PLUGIN_UNIX_SOCKET_DIR` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] -* server: Support setting group write permission and a custom group name or gid owner with the env var `PLUGIN_UNIX_SOCKET_GROUP` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)] - -## v1.4.11-rc1 - -ENHANCEMENTS: - -* deps: bump protoreflect to v1.15.1 [[GH-264](https://github.com/hashicorp/go-plugin/pull/264)] - -## v1.4.10 - -BUG FIXES: - -* additional notes: ensure to close files [[GH-241](https://github.com/hashicorp/go-plugin/pull/241)] - -ENHANCEMENTS: - -* deps: Remove direct dependency on golang.org/x/net [[GH-240](https://github.com/hashicorp/go-plugin/pull/240)] - -## v1.4.9 - -ENHANCEMENTS: - -* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)] - -## v1.4.8 - -BUG FIXES: - -* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)] - -## v1.4.7 - -ENHANCEMENTS: - -* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)] - -## v1.4.6 - -BUG FIXES: - -* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)] - -## v1.4.5 - -ENHANCEMENTS: - -* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)] - - -## v1.4.4 - -ENHANCEMENTS: - -* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)] - -BUG FIXES: - -* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)] -* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)] diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE deleted file mode 100644 index 042324fb7e1..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/LICENSE +++ /dev/null @@ -1,355 +0,0 @@ -Copyright (c) 2016 HashiCorp, Inc. - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md deleted file mode 100644 index 50baee06e15..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ /dev/null @@ -1,165 +0,0 @@ -# Go Plugin System over RPC - -`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system -that has been in use by HashiCorp tooling for over 4 years. While initially -created for [Packer](https://www.packer.io), it is additionally in use by -[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), -[Vault](https://www.vaultproject.io), -[Boundary](https://www.boundaryproject.io), -and [Waypoint](https://www.waypointproject.io). - -While the plugin system is over RPC, it is currently only designed to work -over a local [reliable] network. Plugins over a real network are not supported -and will lead to unexpected behavior. - -This plugin system has been used on millions of machines across many different -projects and has proven to be battle hardened and ready for production use. - -## Features - -The HashiCorp plugin system supports a number of features: - -**Plugins are Go interface implementations.** This makes writing and consuming -plugins feel very natural. To a plugin author: you just implement an -interface as if it were going to run in the same process. For a plugin user: -you just use and call functions on an interface as if it were in the same -process. This plugin system handles the communication in between. - -**Cross-language support.** Plugins can be written (and consumed) by -almost every major language. This library supports serving plugins via -[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written -in any language. - -**Complex arguments and return values are supported.** This library -provides APIs for handling complex arguments and return values such -as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library -(`MuxBroker`) for creating new connections between the client/server to -serve additional interfaces or transfer raw data. - -**Bidirectional communication.** Because the plugin system supports -complex arguments, the host process can send it interface implementations -and the plugin can call back into the host process. - -**Built-in Logging.** Any plugins that use the `log` standard library -will have log data automatically sent to the host process. The host -process will mirror this output prefixed with the path to the plugin -binary. This makes debugging with plugins simple. If the host system -uses [hclog](https://github.com/hashicorp/go-hclog) then the log data -will be structured. If the plugin also uses hclog, logs from the plugin -will be sent to the host hclog and be structured. - -**Protocol Versioning.** A very basic "protocol version" is supported that -can be incremented to invalidate any previous plugins. This is useful when -interface signatures are changing, protocol level changes are necessary, -etc. When a protocol version is incompatible, a human friendly error -message is shown to the end user. - -**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue -to use stdout/stderr as usual and the output will get mirrored back to -the host process. The host process can control what `io.Writer` these -streams go to to prevent this from happening. - -**TTY Preservation.** Plugin subprocesses are connected to the identical -stdin file descriptor as the host process, allowing software that requires -a TTY to work. For example, a plugin can execute `ssh` and even though there -are multiple subprocesses and RPC happening, it will look and act perfectly -to the end user. - -**Host upgrade while a plugin is running.** Plugins can be "reattached" -so that the host process can be upgraded while the plugin is still running. -This requires the host/plugin to know this is possible and daemonize -properly. `NewClient` takes a `ReattachConfig` to determine if and how to -reattach. - -**Cryptographically Secure Plugins.** Plugins can be verified with an expected -checksum and RPC communications can be configured to use TLS. The host process -must be properly secured to protect this configuration. - -## Architecture - -The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single -connection is made between any plugin and the host process. For net/rpc-based -plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. For gRPC-based plugins, -the HTTP2 protocol handles multiplexing. - -This architecture has a number of benefits: - - * Plugins can't crash your host process: A panic in a plugin doesn't - panic the plugin user. - - * Plugins are very easy to write: just write a Go application and `go build`. - Or use any other language to write a gRPC server with a tiny amount of - boilerplate to support go-plugin. - - * Plugins are very easy to install: just put the binary in a location where - the host will find it (depends on the host but this library also provides - helpers), and the plugin host handles the rest. - - * Plugins can be relatively secure: The plugin only has access to the - interfaces and args given to it, not to the entire memory space of the - process. Additionally, go-plugin can communicate with the plugin over - TLS. - -## Usage - -To use the plugin system, you must take the following steps. These are -high-level steps that must be done. Examples are available in the -`examples/` directory. - - 1. Choose the interface(s) you want to expose for plugins. - - 2. For each interface, implement an implementation of that interface - that communicates over a `net/rpc` connection or over a - [gRPC](http://www.grpc.io) connection or both. You'll have to implement - both a client and server implementation. - - 3. Create a `Plugin` implementation that knows how to create the RPC - client/server for a given plugin type. - - 4. Plugin authors call `plugin.Serve` to serve a plugin from the - `main` function. - - 5. Plugin users use `plugin.Client` to launch a subprocess and request - an interface implementation over RPC. - -That's it! In practice, step 2 is the most tedious and time consuming step. -Even so, it isn't very difficult and you can see examples in the `examples/` -directory as well as throughout our various open source projects. - -For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin). - -## Roadmap - -Our plugin system is constantly evolving. As we use the plugin system for -new projects or for new features in existing projects, we constantly find -improvements we can make. - -At this point in time, the roadmap for the plugin system is: - -**Semantic Versioning.** Plugins will be able to implement a semantic version. -This plugin system will give host processes a system for constraining -versions. This is in addition to the protocol versioning already present -which is more for larger underlying changes. - -## What About Shared Libraries? - -When we started using plugins (late 2012, early 2013), plugins over RPC -were the only option since Go didn't support dynamic library loading. Today, -Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with -a number of limitations. Since 2012, our plugin system has stabilized -from tens of millions of users using it, and has many benefits we've come to -value greatly. - -For example, we use this plugin system in -[Vault](https://www.vaultproject.io) where dynamic library loading is -not acceptable for security reasons. That is an extreme -example, but we believe our library system has more upsides than downsides -over dynamic library loading and since we've had it built and tested for years, -we'll continue to use it. - -Shared libraries have one major advantage over our system which is much -higher performance. In real world scenarios across our various tools, -we've never required any more performance out of our plugin system and it -has seen very high throughput, so this isn't a concern for us at the moment. diff --git a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml deleted file mode 100644 index 033d0153b2a..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -version: v1 -plugins: - - plugin: buf.build/protocolbuffers/go - out: . - opt: - - paths=source_relative - - plugin: buf.build/grpc/go:v1.3.0 - out: . - opt: - - paths=source_relative - - require_unimplemented_servers=false diff --git a/vendor/github.com/hashicorp/go-plugin/buf.yaml b/vendor/github.com/hashicorp/go-plugin/buf.yaml deleted file mode 100644 index 3d0da4c7199..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/buf.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: MPL-2.0 - -version: v1 -build: - excludes: - - examples/ \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go deleted file mode 100644 index 73f6b35151c..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ /dev/null @@ -1,1239 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bufio" - "context" - "crypto/subtle" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/cmdrunner" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/hashicorp/go-plugin/runner" - "google.golang.org/grpc" -) - -// If this is 1, then we've called CleanupClients. This can be used -// by plugin RPC implementations to change error behavior since you -// can expected network connection errors at this point. This should be -// read by using sync/atomic. -var Killed uint32 = 0 - -// This is a slice of the "managed" clients which are cleaned up when -// calling Cleanup -var managedClients = make([]*Client, 0, 5) -var managedClientsLock sync.Mutex - -// Error types -var ( - // ErrProcessNotFound is returned when a client is instantiated to - // reattach to an existing process and it isn't found. - ErrProcessNotFound = cmdrunner.ErrProcessNotFound - - // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match - // the one provided in the SecureConfig. - ErrChecksumsDoNotMatch = errors.New("checksums did not match") - - // ErrSecureNoChecksum is returned when an empty checksum is provided to the - // SecureConfig. - ErrSecureConfigNoChecksum = errors.New("no checksum provided") - - // ErrSecureNoHash is returned when a nil Hash object is provided to the - // SecureConfig. - ErrSecureConfigNoHash = errors.New("no hash implementation provided") - - // ErrSecureConfigAndReattach is returned when both Reattach and - // SecureConfig are set. - ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") - - // ErrGRPCBrokerMuxNotSupported is returned when the client requests - // multiplexing over the gRPC broker, but the plugin does not support the - // feature. In most cases, this should be resolvable by updating and - // rebuilding the plugin, or restarting the plugin with - // ClientConfig.GRPCBrokerMultiplex set to false. - ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature") -) - -// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines. -const defaultPluginLogBufferSize = 64 * 1024 - -// Client handles the lifecycle of a plugin application. It launches -// plugins, connects to them, dispenses interface implementations, and handles -// killing the process. -// -// Plugin hosts should use one Client for each plugin executable. To -// dispense a plugin type, use the `Client.Client` function, and then -// cal `Dispense`. This awkward API is mostly historical but is used to split -// the client that deals with subprocess management and the client that -// does RPC management. -// -// See NewClient and ClientConfig for using a Client. -type Client struct { - config *ClientConfig - exited bool - l sync.Mutex - address net.Addr - runner runner.AttachedRunner - client ClientProtocol - protocol Protocol - logger hclog.Logger - doneCtx context.Context - ctxCancel context.CancelFunc - negotiatedVersion int - - // clientWaitGroup is used to manage the lifecycle of the plugin management - // goroutines. - clientWaitGroup sync.WaitGroup - - // stderrWaitGroup is used to prevent the command's Wait() function from - // being called before we've finished reading from the stderr pipe. - stderrWaitGroup sync.WaitGroup - - // processKilled is used for testing only, to flag when the process was - // forcefully killed. - processKilled bool - - unixSocketCfg UnixSocketConfig - - grpcMuxerOnce sync.Once - grpcMuxer *grpcmux.GRPCClientMuxer -} - -// NegotiatedVersion returns the protocol version negotiated with the server. -// This is only valid after Start() is called. -func (c *Client) NegotiatedVersion() int { - return c.negotiatedVersion -} - -// ID returns a unique ID for the running plugin. By default this is the process -// ID (pid), but it could take other forms if RunnerFunc was provided. -func (c *Client) ID() string { - c.l.Lock() - defer c.l.Unlock() - - if c.runner != nil { - return c.runner.ID() - } - - return "" -} - -// ClientConfig is the configuration used to initialize a new -// plugin client. After being used to initialize a plugin client, -// that configuration must not be modified again. -type ClientConfig struct { - // HandshakeConfig is the configuration that must match servers. - HandshakeConfig - - // Plugins are the plugins that can be consumed. - // The implied version of this PluginSet is the Handshake.ProtocolVersion. - Plugins PluginSet - - // VersionedPlugins is a map of PluginSets for specific protocol versions. - // These can be used to negotiate a compatible version between client and - // server. If this is set, Handshake.ProtocolVersion is not required. - VersionedPlugins map[int]PluginSet - - // One of the following must be set, but not both. - // - // Cmd is the unstarted subprocess for starting the plugin. If this is - // set, then the Client starts the plugin process on its own and connects - // to it. - // - // Reattach is configuration for reattaching to an existing plugin process - // that is already running. This isn't common. - Cmd *exec.Cmd - Reattach *ReattachConfig - - // RunnerFunc allows consumers to provide their own implementation of - // runner.Runner and control the context within which a plugin is executed. - // The cmd argument will have been copied from the config and populated with - // environment variables that a go-plugin server expects to read such as - // AutoMTLS certs and the magic cookie key. - RunnerFunc func(l hclog.Logger, cmd *exec.Cmd, tmpDir string) (runner.Runner, error) - - // SecureConfig is configuration for verifying the integrity of the - // executable. It can not be used with Reattach. - SecureConfig *SecureConfig - - // TLSConfig is used to enable TLS on the RPC client. - TLSConfig *tls.Config - - // Managed represents if the client should be managed by the - // plugin package or not. If true, then by calling CleanupClients, - // it will automatically be cleaned up. Otherwise, the client - // user is fully responsible for making sure to Kill all plugin - // clients. By default the client is _not_ managed. - Managed bool - - // The minimum and maximum port to use for communicating with - // the subprocess. If not set, this defaults to 10,000 and 25,000 - // respectively. - MinPort, MaxPort uint - - // StartTimeout is the timeout to wait for the plugin to say it - // has started successfully. - StartTimeout time.Duration - - // If non-nil, then the stderr of the client will be written to here - // (as well as the log). This is the original os.Stderr of the subprocess. - // This isn't the output of synced stderr. - Stderr io.Writer - - // SyncStdout, SyncStderr can be set to override the - // respective os.Std* values in the plugin. Care should be taken to - // avoid races here. If these are nil, then this will be set to - // ioutil.Discard. - SyncStdout io.Writer - SyncStderr io.Writer - - // AllowedProtocols is a list of allowed protocols. If this isn't set, - // then only netrpc is allowed. This is so that older go-plugin systems - // can show friendly errors if they see a plugin with an unknown - // protocol. - // - // By setting this, you can cause an error immediately on plugin start - // if an unsupported protocol is used with a good error message. - // - // If this isn't set at all (nil value), then only net/rpc is accepted. - // This is done for legacy reasons. You must explicitly opt-in to - // new protocols. - AllowedProtocols []Protocol - - // Logger is the logger that the client will used. If none is provided, - // it will default to hclog's default logger. - Logger hclog.Logger - - // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines. - // If this is 0, then the default of 64KB is used. - PluginLogBufferSize int - - // AutoMTLS has the client and server automatically negotiate mTLS for - // transport authentication. This ensures that only the original client will - // be allowed to connect to the server, and all other connections will be - // rejected. The client will also refuse to connect to any server that isn't - // the original instance started by the client. - // - // In this mode of operation, the client generates a one-time use tls - // certificate, sends the public x.509 certificate to the new server, and - // the server generates a one-time use tls certificate, and sends the public - // x.509 certificate back to the client. These are used to authenticate all - // rpc connections between the client and server. - // - // Setting AutoMTLS to true implies that the server must support the - // protocol, and correctly negotiate the tls certificates, or a connection - // failure will result. - // - // The client should not set TLSConfig, nor should the server set a - // TLSProvider, because AutoMTLS implies that a new certificate and tls - // configuration will be generated at startup. - // - // You cannot Reattach to a server with this option enabled. - AutoMTLS bool - - // GRPCDialOptions allows plugin users to pass custom grpc.DialOption - // to create gRPC connections. This only affects plugins using the gRPC - // protocol. - GRPCDialOptions []grpc.DialOption - - // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC - // broker will multiplex all brokered gRPC servers over the plugin's original - // listener socket instead of making a new listener for each server. The - // go-plugin library currently only includes a Go implementation for the - // server (i.e. plugin) side of gRPC broker multiplexing. - // - // Does not support reattaching. - // - // Multiplexed gRPC streams MUST be established sequentially, i.e. after - // calling AcceptAndServe from one side, wait for the other side to Dial - // before calling AcceptAndServe again. - GRPCBrokerMultiplex bool - - // SkipHostEnv allows plugins to run without inheriting the parent process' - // environment variables. - SkipHostEnv bool - - // UnixSocketConfig configures additional options for any Unix sockets - // that are created. Not normally required. Not supported on Windows. - UnixSocketConfig *UnixSocketConfig -} - -type UnixSocketConfig struct { - // If set, go-plugin will change the owner of any Unix sockets created to - // this group, and set them as group-writable. Can be a name or gid. The - // client process must be a member of this group or chown will fail. - Group string - - // TempDir specifies the base directory to use when creating a plugin-specific - // temporary directory. It is expected to already exist and be writable. If - // not set, defaults to the directory chosen by os.MkdirTemp. - TempDir string - - // The directory to create Unix sockets in. Internally created and managed - // by go-plugin and deleted when the plugin is killed. Will be created - // inside TempDir if specified. - socketDir string -} - -// ReattachConfig is used to configure a client to reattach to an -// already-running plugin process. You can retrieve this information by -// calling ReattachConfig on Client. -type ReattachConfig struct { - Protocol Protocol - ProtocolVersion int - Addr net.Addr - Pid int - - // ReattachFunc allows consumers to provide their own implementation of - // runner.AttachedRunner and attach to something other than a plain process. - // At least one of Pid or ReattachFunc must be set. - ReattachFunc runner.ReattachFunc - - // Test is set to true if this is reattaching to to a plugin in "test mode" - // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the - // process and instead will rely on the plugin to terminate itself. This - // should not be used in non-test environments. - Test bool -} - -// SecureConfig is used to configure a client to verify the integrity of an -// executable before running. It does this by verifying the checksum is -// expected. Hash is used to specify the hashing method to use when checksumming -// the file. The configuration is verified by the client by calling the -// SecureConfig.Check() function. -// -// The host process should ensure the checksum was provided by a trusted and -// authoritative source. The binary should be installed in such a way that it -// can not be modified by an unauthorized user between the time of this check -// and the time of execution. -type SecureConfig struct { - Checksum []byte - Hash hash.Hash -} - -// Check takes the filepath to an executable and returns true if the checksum of -// the file matches the checksum provided in the SecureConfig. -func (s *SecureConfig) Check(filePath string) (bool, error) { - if len(s.Checksum) == 0 { - return false, ErrSecureConfigNoChecksum - } - - if s.Hash == nil { - return false, ErrSecureConfigNoHash - } - - file, err := os.Open(filePath) - if err != nil { - return false, err - } - defer file.Close() - - _, err = io.Copy(s.Hash, file) - if err != nil { - return false, err - } - - sum := s.Hash.Sum(nil) - - return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil -} - -// This makes sure all the managed subprocesses are killed and properly -// logged. This should be called before the parent process running the -// plugins exits. -// -// This must only be called _once_. -func CleanupClients() { - // Set the killed to true so that we don't get unexpected panics - atomic.StoreUint32(&Killed, 1) - - // Kill all the managed clients in parallel and use a WaitGroup - // to wait for them all to finish up. - var wg sync.WaitGroup - managedClientsLock.Lock() - for _, client := range managedClients { - wg.Add(1) - - go func(client *Client) { - client.Kill() - wg.Done() - }(client) - } - managedClientsLock.Unlock() - - wg.Wait() -} - -// NewClient creates a new plugin client which manages the lifecycle of an external -// plugin and gets the address for the RPC connection. -// -// The client must be cleaned up at some point by calling Kill(). If -// the client is a managed client (created with ClientConfig.Managed) you -// can just call CleanupClients at the end of your program and they will -// be properly cleaned. -func NewClient(config *ClientConfig) (c *Client) { - if config.MinPort == 0 && config.MaxPort == 0 { - config.MinPort = 10000 - config.MaxPort = 25000 - } - - if config.StartTimeout == 0 { - config.StartTimeout = 1 * time.Minute - } - - if config.Stderr == nil { - config.Stderr = ioutil.Discard - } - - if config.SyncStdout == nil { - config.SyncStdout = io.Discard - } - if config.SyncStderr == nil { - config.SyncStderr = io.Discard - } - - if config.AllowedProtocols == nil { - config.AllowedProtocols = []Protocol{ProtocolNetRPC} - } - - if config.Logger == nil { - config.Logger = hclog.New(&hclog.LoggerOptions{ - Output: hclog.DefaultOutput, - Level: hclog.Trace, - Name: "plugin", - }) - } - - if config.PluginLogBufferSize == 0 { - config.PluginLogBufferSize = defaultPluginLogBufferSize - } - - c = &Client{ - config: config, - logger: config.Logger, - } - if config.Managed { - managedClientsLock.Lock() - managedClients = append(managedClients, c) - managedClientsLock.Unlock() - } - - return -} - -// Client returns the protocol client for this connection. -// -// Subsequent calls to this will return the same client. -func (c *Client) Client() (ClientProtocol, error) { - _, err := c.Start() - if err != nil { - return nil, err - } - - c.l.Lock() - defer c.l.Unlock() - - if c.client != nil { - return c.client, nil - } - - switch c.protocol { - case ProtocolNetRPC: - c.client, err = newRPCClient(c) - - case ProtocolGRPC: - c.client, err = newGRPCClient(c.doneCtx, c) - - default: - return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) - } - - if err != nil { - c.client = nil - return nil, err - } - - return c.client, nil -} - -// Tells whether or not the underlying process has exited. -func (c *Client) Exited() bool { - c.l.Lock() - defer c.l.Unlock() - return c.exited -} - -// killed is used in tests to check if a process failed to exit gracefully, and -// needed to be killed. -func (c *Client) killed() bool { - c.l.Lock() - defer c.l.Unlock() - return c.processKilled -} - -// End the executing subprocess (if it is running) and perform any cleanup -// tasks necessary such as capturing any remaining logs and so on. -// -// This method blocks until the process successfully exits. -// -// This method can safely be called multiple times. -func (c *Client) Kill() { - // Grab a lock to read some private fields. - c.l.Lock() - runner := c.runner - addr := c.address - hostSocketDir := c.unixSocketCfg.socketDir - c.l.Unlock() - - // If there is no runner or ID, there is nothing to kill. - if runner == nil || runner.ID() == "" { - return - } - - defer func() { - // Wait for the all client goroutines to finish. - c.clientWaitGroup.Wait() - - if hostSocketDir != "" { - os.RemoveAll(hostSocketDir) - } - - // Make sure there is no reference to the old process after it has been - // killed. - c.l.Lock() - c.runner = nil - c.l.Unlock() - }() - - // We need to check for address here. It is possible that the plugin - // started (process != nil) but has no address (addr == nil) if the - // plugin failed at startup. If we do have an address, we need to close - // the plugin net connections. - graceful := false - if addr != nil { - // Close the client to cleanly exit the process. - client, err := c.Client() - if err == nil { - err = client.Close() - - // If there is no error, then we attempt to wait for a graceful - // exit. If there was an error, we assume that graceful cleanup - // won't happen and just force kill. - graceful = err == nil - if err != nil { - // If there was an error just log it. We're going to force - // kill in a moment anyways. - c.logger.Warn("error closing client during Kill", "err", err) - } - } else { - c.logger.Error("client", "error", err) - } - } - - // If we're attempting a graceful exit, then we wait for a short period - // of time to allow that to happen. To wait for this we just wait on the - // doneCh which would be closed if the process exits. - if graceful { - select { - case <-c.doneCtx.Done(): - c.logger.Debug("plugin exited") - return - case <-time.After(2 * time.Second): - } - } - - // If graceful exiting failed, just kill it - c.logger.Warn("plugin failed to exit gracefully") - if err := runner.Kill(context.Background()); err != nil { - c.logger.Debug("error killing plugin", "error", err) - } - - c.l.Lock() - c.processKilled = true - c.l.Unlock() -} - -// Start the underlying subprocess, communicating with it to negotiate -// a port for RPC connections, and returning the address to connect via RPC. -// -// This method is safe to call multiple times. Subsequent calls have no effect. -// Once a client has been started once, it cannot be started again, even if -// it was killed. -func (c *Client) Start() (addr net.Addr, err error) { - c.l.Lock() - defer c.l.Unlock() - - if c.address != nil { - return c.address, nil - } - - // If one of cmd or reattach isn't set, then it is an error. We wrap - // this in a {} for scoping reasons, and hopeful that the escape - // analysis will pop the stack here. - { - var mutuallyExclusiveOptions int - if c.config.Cmd != nil { - mutuallyExclusiveOptions += 1 - } - if c.config.Reattach != nil { - mutuallyExclusiveOptions += 1 - } - if c.config.RunnerFunc != nil { - mutuallyExclusiveOptions += 1 - } - if mutuallyExclusiveOptions != 1 { - return nil, fmt.Errorf("exactly one of Cmd, or Reattach, or RunnerFunc must be set") - } - - if c.config.SecureConfig != nil && c.config.Reattach != nil { - return nil, ErrSecureConfigAndReattach - } - - if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil { - return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config") - } - } - - if c.config.Reattach != nil { - return c.reattach() - } - - if c.config.VersionedPlugins == nil { - c.config.VersionedPlugins = make(map[int]PluginSet) - } - - // handle all plugins as versioned, using the handshake config as the default. - version := int(c.config.ProtocolVersion) - - // Make sure we're not overwriting a real version 0. If ProtocolVersion was - // non-zero, then we have to just assume the user made sure that - // VersionedPlugins doesn't conflict. - if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { - c.config.VersionedPlugins[version] = c.config.Plugins - } - - var versionStrings []string - for v := range c.config.VersionedPlugins { - versionStrings = append(versionStrings, strconv.Itoa(v)) - } - - env := []string{ - fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), - fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), - fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), - fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), - } - if c.config.GRPCBrokerMultiplex { - env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC)) - } - - cmd := c.config.Cmd - if cmd == nil { - // It's only possible to get here if RunnerFunc is non-nil, but we'll - // still use cmd as a spec to populate metadata for the external - // implementation to consume. - cmd = exec.Command("") - } - if !c.config.SkipHostEnv { - cmd.Env = append(cmd.Env, os.Environ()...) - } - cmd.Env = append(cmd.Env, env...) - cmd.Stdin = os.Stdin - - if c.config.SecureConfig != nil { - if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { - return nil, fmt.Errorf("error verifying checksum: %s", err) - } else if !ok { - return nil, ErrChecksumsDoNotMatch - } - } - - // Setup a temporary certificate for client/server mtls, and send the public - // certificate to the plugin. - if c.config.AutoMTLS { - c.logger.Info("configuring client automatic mTLS") - certPEM, keyPEM, err := generateCert() - if err != nil { - c.logger.Error("failed to generate client certificate", "error", err) - return nil, err - } - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - c.logger.Error("failed to parse client certificate", "error", err) - return nil, err - } - - cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) - - c.config.TLSConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - ClientAuth: tls.RequireAndVerifyClientCert, - MinVersion: tls.VersionTLS12, - ServerName: "localhost", - } - } - - if c.config.UnixSocketConfig != nil { - c.unixSocketCfg = *c.config.UnixSocketConfig - } - - if c.unixSocketCfg.Group != "" { - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketGroup, c.unixSocketCfg.Group)) - } - - var runner runner.Runner - switch { - case c.config.RunnerFunc != nil: - c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir") - if err != nil { - return nil, err - } - // os.MkdirTemp creates folders with 0o700, so if we have a group - // configured we need to make it group-writable. - if c.unixSocketCfg.Group != "" { - err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770) - if err != nil { - return nil, err - } - } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir)) - c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir) - - runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir) - if err != nil { - return nil, err - } - default: - runner, err = cmdrunner.NewCmdRunner(c.logger, cmd) - if err != nil { - return nil, err - } - - } - - c.runner = runner - startCtx, startCtxCancel := context.WithTimeout(context.Background(), c.config.StartTimeout) - defer startCtxCancel() - err = runner.Start(startCtx) - if err != nil { - return nil, err - } - - // Make sure the command is properly cleaned up if there is an error - defer func() { - rErr := recover() - - if err != nil || rErr != nil { - runner.Kill(context.Background()) - } - - if rErr != nil { - panic(rErr) - } - }() - - // Create a context for when we kill - c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) - - // Start goroutine that logs the stderr - c.clientWaitGroup.Add(1) - c.stderrWaitGroup.Add(1) - // logStderr calls Done() - go c.logStderr(runner.Name(), runner.Stderr()) - - c.clientWaitGroup.Add(1) - go func() { - // ensure the context is cancelled when we're done - defer c.ctxCancel() - - defer c.clientWaitGroup.Done() - - // wait to finish reading from stderr since the stderr pipe reader - // will be closed by the subsequent call to cmd.Wait(). - c.stderrWaitGroup.Wait() - - // Wait for the command to end. - err := runner.Wait(context.Background()) - if err != nil { - c.logger.Error("plugin process exited", "plugin", runner.Name(), "id", runner.ID(), "error", err.Error()) - } else { - // Log and make sure to flush the logs right away - c.logger.Info("plugin process exited", "plugin", runner.Name(), "id", runner.ID()) - } - - os.Stderr.Sync() - - // Set that we exited, which takes a lock - c.l.Lock() - defer c.l.Unlock() - c.exited = true - }() - - // Start a goroutine that is going to be reading the lines - // out of stdout - linesCh := make(chan string) - c.clientWaitGroup.Add(1) - go func() { - defer c.clientWaitGroup.Done() - defer close(linesCh) - - scanner := bufio.NewScanner(runner.Stdout()) - for scanner.Scan() { - linesCh <- scanner.Text() - } - if scanner.Err() != nil { - c.logger.Error("error encountered while scanning stdout", "error", scanner.Err()) - } - }() - - // Make sure after we exit we read the lines from stdout forever - // so they don't block since it is a pipe. - // The scanner goroutine above will close this, but track it with a wait - // group for completeness. - c.clientWaitGroup.Add(1) - defer func() { - go func() { - defer c.clientWaitGroup.Done() - for range linesCh { - } - }() - }() - - // Some channels for the next step - timeout := time.After(c.config.StartTimeout) - - // Start looking for the address - c.logger.Debug("waiting for RPC address", "plugin", runner.Name()) - select { - case <-timeout: - err = errors.New("timeout while waiting for plugin to start") - case <-c.doneCtx.Done(): - err = errors.New("plugin exited before we could connect") - case line, ok := <-linesCh: - // Trim the line and split by "|" in order to get the parts of - // the output. - line = strings.TrimSpace(line) - parts := strings.Split(line, "|") - if len(parts) < 4 { - errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line) - if !ok { - errText += "\n" + "Failed to read any lines from plugin's stdout" - } - additionalNotes := runner.Diagnose(context.Background()) - if additionalNotes != "" { - errText += "\n" + additionalNotes - } - err = errors.New(errText) - return - } - - // Check the core protocol. Wrapped in a {} for scoping. - { - var coreProtocol int - coreProtocol, err = strconv.Atoi(parts[0]) - if err != nil { - err = fmt.Errorf("Error parsing core protocol version: %s", err) - return - } - - if coreProtocol != CoreProtocolVersion { - err = fmt.Errorf("Incompatible core API version with plugin. "+ - "Plugin version: %s, Core version: %d\n\n"+ - "To fix this, the plugin usually only needs to be recompiled.\n"+ - "Please report this to the plugin author.", parts[0], CoreProtocolVersion) - return - } - } - - // Test the API version - version, pluginSet, err := c.checkProtoVersion(parts[1]) - if err != nil { - return addr, err - } - - // set the Plugins value to the compatible set, so the version - // doesn't need to be passed through to the ClientProtocol - // implementation. - c.config.Plugins = pluginSet - c.negotiatedVersion = version - c.logger.Debug("using plugin", "version", version) - - network, address, err := runner.PluginToHost(parts[2], parts[3]) - if err != nil { - return addr, err - } - - switch network { - case "tcp": - addr, err = net.ResolveTCPAddr("tcp", address) - case "unix": - addr, err = net.ResolveUnixAddr("unix", address) - default: - err = fmt.Errorf("Unknown address type: %s", address) - } - - // If we have a server type, then record that. We default to net/rpc - // for backwards compatibility. - c.protocol = ProtocolNetRPC - if len(parts) >= 5 { - c.protocol = Protocol(parts[4]) - } - - found := false - for _, p := range c.config.AllowedProtocols { - if p == c.protocol { - found = true - break - } - } - if !found { - err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", - c.protocol, c.config.AllowedProtocols) - return addr, err - } - - // See if we have a TLS certificate from the server. - // Checking if the length is > 50 rules out catching the unused "extra" - // data returned from some older implementations. - if len(parts) >= 6 && len(parts[5]) > 50 { - err := c.loadServerCert(parts[5]) - if err != nil { - return nil, fmt.Errorf("error parsing server cert: %s", err) - } - } - - if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC { - if len(parts) <= 6 { - return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+ - "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported) - } - if muxSupported, err := strconv.ParseBool(parts[6]); err != nil { - return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6]) - } else if !muxSupported { - return nil, ErrGRPCBrokerMuxNotSupported - } - } - } - - c.address = addr - return -} - -// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the -// server, and load it as the RootCA and ClientCA for the client TLSConfig. -func (c *Client) loadServerCert(cert string) error { - certPool := x509.NewCertPool() - - asn1, err := base64.RawStdEncoding.DecodeString(cert) - if err != nil { - return err - } - - x509Cert, err := x509.ParseCertificate([]byte(asn1)) - if err != nil { - return err - } - - certPool.AddCert(x509Cert) - - c.config.TLSConfig.RootCAs = certPool - c.config.TLSConfig.ClientCAs = certPool - return nil -} - -func (c *Client) reattach() (net.Addr, error) { - reattachFunc := c.config.Reattach.ReattachFunc - // For backwards compatibility default to cmdrunner.ReattachFunc - if reattachFunc == nil { - reattachFunc = cmdrunner.ReattachFunc(c.config.Reattach.Pid, c.config.Reattach.Addr) - } - - r, err := reattachFunc() - if err != nil { - return nil, err - } - - // Create a context for when we kill - c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) - - c.clientWaitGroup.Add(1) - // Goroutine to mark exit status - go func(r runner.AttachedRunner) { - defer c.clientWaitGroup.Done() - - // ensure the context is cancelled when we're done - defer c.ctxCancel() - - // Wait for the process to die - r.Wait(context.Background()) - - // Log so we can see it - c.logger.Debug("reattached plugin process exited") - - // Mark it - c.l.Lock() - defer c.l.Unlock() - c.exited = true - }(r) - - // Set the address and protocol - c.address = c.config.Reattach.Addr - c.protocol = c.config.Reattach.Protocol - if c.protocol == "" { - // Default the protocol to net/rpc for backwards compatibility - c.protocol = ProtocolNetRPC - } - - if c.config.Reattach.Test { - c.negotiatedVersion = c.config.Reattach.ProtocolVersion - } else { - // If we're in test mode, we do NOT set the runner. This avoids the - // runner being killed (the only purpose we have for setting c.runner - // when reattaching), since in test mode the process is responsible for - // exiting on its own. - c.runner = r - } - - return c.address, nil -} - -// checkProtoVersion returns the negotiated version and PluginSet. -// This returns an error if the server returned an incompatible protocol -// version, or an invalid handshake response. -func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { - serverVersion, err := strconv.Atoi(protoVersion) - if err != nil { - return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) - } - - // record these for the error message - var clientVersions []int - - // all versions, including the legacy ProtocolVersion have been added to - // the versions set - for version, plugins := range c.config.VersionedPlugins { - clientVersions = append(clientVersions, version) - - if serverVersion != version { - continue - } - return version, plugins, nil - } - - return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) -} - -// ReattachConfig returns the information that must be provided to NewClient -// to reattach to the plugin process that this client started. This is -// useful for plugins that detach from their parent process. -// -// If this returns nil then the process hasn't been started yet. Please -// call Start or Client before calling this. -// -// Clients who specified a RunnerFunc will need to populate their own -// ReattachFunc in the returned ReattachConfig before it can be used. -func (c *Client) ReattachConfig() *ReattachConfig { - c.l.Lock() - defer c.l.Unlock() - - if c.address == nil { - return nil - } - - if c.config.Cmd != nil && c.config.Cmd.Process == nil { - return nil - } - - // If we connected via reattach, just return the information as-is - if c.config.Reattach != nil { - return c.config.Reattach - } - - reattach := &ReattachConfig{ - Protocol: c.protocol, - Addr: c.address, - } - - if c.config.Cmd != nil && c.config.Cmd.Process != nil { - reattach.Pid = c.config.Cmd.Process.Pid - } - - return reattach -} - -// Protocol returns the protocol of server on the remote end. This will -// start the plugin process if it isn't already started. Errors from -// starting the plugin are surpressed and ProtocolInvalid is returned. It -// is recommended you call Start explicitly before calling Protocol to ensure -// no errors occur. -func (c *Client) Protocol() Protocol { - _, err := c.Start() - if err != nil { - return ProtocolInvalid - } - - return c.protocol -} - -func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { - return func(_ string, _ time.Duration) (net.Conn, error) { - // Connect to the client - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - return conn, nil - } -} - -// dialer is compatible with grpc.WithDialer and creates the connection -// to the plugin. -func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { - muxer, err := c.getGRPCMuxer(c.address) - if err != nil { - return nil, err - } - - var conn net.Conn - if muxer.Enabled() { - conn, err = muxer.Dial() - if err != nil { - return nil, err - } - } else { - conn, err = netAddrDialer(c.address)("", timeout) - if err != nil { - return nil, err - } - } - - // If we have a TLS config we wrap our connection. We only do this - // for net/rpc since gRPC uses its own mechanism for TLS. - if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - return conn, nil -} - -func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) { - if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex { - return nil, nil - } - - var err error - c.grpcMuxerOnce.Do(func() { - c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr) - }) - if err != nil { - return nil, err - } - - return c.grpcMuxer, nil -} - -func (c *Client) logStderr(name string, r io.Reader) { - defer c.clientWaitGroup.Done() - defer c.stderrWaitGroup.Done() - l := c.logger.Named(filepath.Base(name)) - - reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize) - // continuation indicates the previous line was a prefix - continuation := false - - for { - line, isPrefix, err := reader.ReadLine() - switch { - case err == io.EOF: - return - case err != nil: - l.Error("reading plugin stderr", "error", err) - return - } - - c.config.Stderr.Write(line) - - // The line was longer than our max token size, so it's likely - // incomplete and won't unmarshal. - if isPrefix || continuation { - l.Debug(string(line)) - - // if we're finishing a continued line, add the newline back in - if !isPrefix { - c.config.Stderr.Write([]byte{'\n'}) - } - - continuation = isPrefix - continue - } - - c.config.Stderr.Write([]byte{'\n'}) - - entry, err := parseJSON(line) - // If output is not JSON format, print directly to Debug - if err != nil { - // Attempt to infer the desired log level from the commonly used - // string prefixes - switch line := string(line); { - case strings.HasPrefix(line, "[TRACE]"): - l.Trace(line) - case strings.HasPrefix(line, "[DEBUG]"): - l.Debug(line) - case strings.HasPrefix(line, "[INFO]"): - l.Info(line) - case strings.HasPrefix(line, "[WARN]"): - l.Warn(line) - case strings.HasPrefix(line, "[ERROR]"): - l.Error(line) - default: - l.Debug(line) - } - } else { - out := flattenKVPairs(entry.KVPairs) - - out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) - switch hclog.LevelFromString(entry.Level) { - case hclog.Trace: - l.Trace(entry.Message, out...) - case hclog.Debug: - l.Debug(entry.Message, out...) - case hclog.Info: - l.Info(entry.Message, out...) - case hclog.Warn: - l.Warn(entry.Message, out...) - case hclog.Error: - l.Error(entry.Message, out...) - default: - // if there was no log level, it's likely this is unexpected - // json from something other than hclog, and we should output - // it verbatim. - l.Debug(string(line)) - } - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/constants.go b/vendor/github.com/hashicorp/go-plugin/constants.go deleted file mode 100644 index e7f5bbe5f7c..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/constants.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -const ( - // EnvUnixSocketDir specifies the directory that _plugins_ should create unix - // sockets in. Does not affect client behavior. - EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR" - - // EnvUnixSocketGroup specifies the owning, writable group to set for Unix - // sockets created by _plugins_. Does not affect client behavior. - EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP" - - envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC" -) diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go deleted file mode 100644 index c5b96242b1a..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/discover.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "path/filepath" -) - -// Discover discovers plugins that are in a given directory. -// -// The directory doesn't need to be absolute. For example, "." will work fine. -// -// This currently assumes any file matching the glob is a plugin. -// In the future this may be smarter about checking that a file is -// executable and so on. -// -// TODO: test -func Discover(glob, dir string) ([]string, error) { - var err error - - // Make the directory absolute if it isn't already - if !filepath.IsAbs(dir) { - dir, err = filepath.Abs(dir) - if err != nil { - return nil, err - } - } - - return filepath.Glob(filepath.Join(dir, glob)) -} diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go deleted file mode 100644 index e62a21913f4..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/error.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -// This is a type that wraps error types so that they can be messaged -// across RPC channels. Since "error" is an interface, we can't always -// gob-encode the underlying structure. This is a valid error interface -// implementer that we will push across. -type BasicError struct { - Message string -} - -// NewBasicError is used to create a BasicError. -// -// err is allowed to be nil. -func NewBasicError(err error) *BasicError { - if err == nil { - return nil - } - - return &BasicError{err.Error()} -} - -func (e *BasicError) Error() string { - return e.Message -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go deleted file mode 100644 index 5b17e37fef0..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go +++ /dev/null @@ -1,654 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "log" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/hashicorp/go-plugin/internal/plugin" - "github.com/hashicorp/go-plugin/runner" - - "github.com/oklog/run" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -// streamer interface is used in the broker to send/receive connection -// information. -type streamer interface { - Send(*plugin.ConnInfo) error - Recv() (*plugin.ConnInfo, error) - Close() -} - -// sendErr is used to pass errors back during a send. -type sendErr struct { - i *plugin.ConnInfo - ch chan error -} - -// gRPCBrokerServer is used by the plugin to start a stream and to send -// connection information to/from the plugin. Implements GRPCBrokerServer and -// streamer interfaces. -type gRPCBrokerServer struct { - plugin.UnimplementedGRPCBrokerServer - - // send is used to send connection info to the gRPC stream. - send chan *sendErr - - // recv is used to receive connection info from the gRPC stream. - recv chan *plugin.ConnInfo - - // quit closes down the stream. - quit chan struct{} - - // o is used to ensure we close the quit channel only once. - o sync.Once -} - -func newGRPCBrokerServer() *gRPCBrokerServer { - return &gRPCBrokerServer{ - send: make(chan *sendErr), - recv: make(chan *plugin.ConnInfo), - quit: make(chan struct{}), - } -} - -// StartStream implements the GRPCBrokerServer interface and will block until -// the quit channel is closed or the context reports Done. The stream will pass -// connection information to/from the client. -func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { - doneCh := stream.Context().Done() - defer s.Close() - - // Proccess send stream - go func() { - for { - select { - case <-doneCh: - return - case <-s.quit: - return - case se := <-s.send: - err := stream.Send(se.i) - se.ch <- err - } - } - }() - - // Process receive stream - for { - i, err := stream.Recv() - if err != nil { - return err - } - select { - case <-doneCh: - return nil - case <-s.quit: - return nil - case s.recv <- i: - } - } - - return nil -} - -// Send is used by the GRPCBroker to pass connection information into the stream -// to the client. -func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { - ch := make(chan error) - defer close(ch) - - select { - case <-s.quit: - return errors.New("broker closed") - case s.send <- &sendErr{ - i: i, - ch: ch, - }: - } - - return <-ch -} - -// Recv is used by the GRPCBroker to pass connection information that has been -// sent from the client from the stream to the broker. -func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { - select { - case <-s.quit: - return nil, errors.New("broker closed") - case i := <-s.recv: - return i, nil - } -} - -// Close closes the quit channel, shutting down the stream. -func (s *gRPCBrokerServer) Close() { - s.o.Do(func() { - close(s.quit) - }) -} - -// gRPCBrokerClientImpl is used by the client to start a stream and to send -// connection information to/from the client. Implements GRPCBrokerClient and -// streamer interfaces. -type gRPCBrokerClientImpl struct { - // client is the underlying GRPC client used to make calls to the server. - client plugin.GRPCBrokerClient - - // send is used to send connection info to the gRPC stream. - send chan *sendErr - - // recv is used to receive connection info from the gRPC stream. - recv chan *plugin.ConnInfo - - // quit closes down the stream. - quit chan struct{} - - // o is used to ensure we close the quit channel only once. - o sync.Once -} - -func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { - return &gRPCBrokerClientImpl{ - client: plugin.NewGRPCBrokerClient(conn), - send: make(chan *sendErr), - recv: make(chan *plugin.ConnInfo), - quit: make(chan struct{}), - } -} - -// StartStream implements the GRPCBrokerClient interface and will block until -// the quit channel is closed or the context reports Done. The stream will pass -// connection information to/from the plugin. -func (s *gRPCBrokerClientImpl) StartStream() error { - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - defer s.Close() - - stream, err := s.client.StartStream(ctx) - if err != nil { - return err - } - doneCh := stream.Context().Done() - - go func() { - for { - select { - case <-doneCh: - return - case <-s.quit: - return - case se := <-s.send: - err := stream.Send(se.i) - se.ch <- err - } - } - }() - - for { - i, err := stream.Recv() - if err != nil { - return err - } - select { - case <-doneCh: - return nil - case <-s.quit: - return nil - case s.recv <- i: - } - } - - return nil -} - -// Send is used by the GRPCBroker to pass connection information into the stream -// to the plugin. -func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { - ch := make(chan error) - defer close(ch) - - select { - case <-s.quit: - return errors.New("broker closed") - case s.send <- &sendErr{ - i: i, - ch: ch, - }: - } - - return <-ch -} - -// Recv is used by the GRPCBroker to pass connection information that has been -// sent from the plugin to the broker. -func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { - select { - case <-s.quit: - return nil, errors.New("broker closed") - case i := <-s.recv: - return i, nil - } -} - -// Close closes the quit channel, shutting down the stream. -func (s *gRPCBrokerClientImpl) Close() { - s.o.Do(func() { - close(s.quit) - }) -} - -// GRPCBroker is responsible for brokering connections by unique ID. -// -// It is used by plugins to create multiple gRPC connections and data -// streams between the plugin process and the host process. -// -// This allows a plugin to request a channel with a specific ID to connect to -// or accept a connection from, and the broker handles the details of -// holding these channels open while they're being negotiated. -// -// The Plugin interface has access to these for both Server and Client. -// The broker can be used by either (optionally) to reserve and connect to -// new streams. This is useful for complex args and return values, -// or anything else you might need a data stream for. -type GRPCBroker struct { - nextId uint32 - streamer streamer - tls *tls.Config - doneCh chan struct{} - o sync.Once - - clientStreams map[uint32]*gRPCBrokerPending - serverStreams map[uint32]*gRPCBrokerPending - - unixSocketCfg UnixSocketConfig - addrTranslator runner.AddrTranslator - - dialMutex sync.Mutex - - muxer grpcmux.GRPCMuxer - - sync.Mutex -} - -type gRPCBrokerPending struct { - ch chan *plugin.ConnInfo - doneCh chan struct{} - once sync.Once -} - -func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker { - return &GRPCBroker{ - streamer: s, - tls: tls, - doneCh: make(chan struct{}), - - clientStreams: make(map[uint32]*gRPCBrokerPending), - serverStreams: make(map[uint32]*gRPCBrokerPending), - muxer: muxer, - - unixSocketCfg: unixSocketCfg, - addrTranslator: addrTranslator, - } -} - -// Accept accepts a connection by ID. -// -// This should not be called multiple times with the same ID at one time. -func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { - if b.muxer.Enabled() { - p := b.getServerStream(id) - go func() { - err := b.listenForKnocks(id) - if err != nil { - log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err) - } - }() - - ln, err := b.muxer.Listener(id, p.doneCh) - if err != nil { - return nil, err - } - - ln = &rmListener{ - Listener: ln, - close: func() error { - // We could have multiple listeners on the same ID, so use sync.Once - // for closing doneCh to ensure we don't get a panic. - p.once.Do(func() { - close(p.doneCh) - }) - - b.Lock() - defer b.Unlock() - - // No longer need to listen for knocks once the listener is closed. - delete(b.serverStreams, id) - - return nil - }, - } - - return ln, nil - } - - listener, err := serverListener(b.unixSocketCfg) - if err != nil { - return nil, err - } - - advertiseNet := listener.Addr().Network() - advertiseAddr := listener.Addr().String() - if b.addrTranslator != nil { - advertiseNet, advertiseAddr, err = b.addrTranslator.HostToPlugin(advertiseNet, advertiseAddr) - if err != nil { - return nil, err - } - } - err = b.streamer.Send(&plugin.ConnInfo{ - ServiceId: id, - Network: advertiseNet, - Address: advertiseAddr, - }) - if err != nil { - return nil, err - } - - return listener, nil -} - -// AcceptAndServe is used to accept a specific stream ID and immediately -// serve a gRPC server on that stream ID. This is used to easily serve -// complex arguments. Each AcceptAndServe call opens a new listener socket and -// sends the connection info down the stream to the dialer. Since a new -// connection is opened every call, these calls should be used sparingly. -// Multiple gRPC server implementations can be registered to a single -// AcceptAndServe call. -func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) { - ln, err := b.Accept(id) - if err != nil { - log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) - return - } - defer ln.Close() - - var opts []grpc.ServerOption - if b.tls != nil { - opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} - } - - server := newGRPCServer(opts) - - // Here we use a run group to close this goroutine if the server is shutdown - // or the broker is shutdown. - var g run.Group - { - // Serve on the listener, if shutting down call GracefulStop. - g.Add(func() error { - return server.Serve(ln) - }, func(err error) { - server.GracefulStop() - }) - } - { - // block on the closeCh or the doneCh. If we are shutting down close the - // closeCh. - closeCh := make(chan struct{}) - g.Add(func() error { - select { - case <-b.doneCh: - case <-closeCh: - } - return nil - }, func(err error) { - close(closeCh) - }) - } - - // Block until we are done - g.Run() -} - -// Close closes the stream and all servers. -func (b *GRPCBroker) Close() error { - b.streamer.Close() - b.o.Do(func() { - close(b.doneCh) - }) - return nil -} - -func (b *GRPCBroker) listenForKnocks(id uint32) error { - p := b.getServerStream(id) - for { - select { - case msg := <-p.ch: - // Shouldn't be possible. - if msg.ServiceId != id { - return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId) - } - - // Also shouldn't be possible. - if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack { - return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock) - } - - // Successful knock, open the door for the given ID. - var ackError string - err := b.muxer.AcceptKnock(id) - if err != nil { - ackError = err.Error() - } - - // Send back an acknowledgement to allow the client to start dialling. - err = b.streamer.Send(&plugin.ConnInfo{ - ServiceId: id, - Knock: &plugin.ConnInfo_Knock{ - Knock: true, - Ack: true, - Error: ackError, - }, - }) - if err != nil { - return fmt.Errorf("error sending back knock acknowledgement: %w", err) - } - case <-p.doneCh: - return nil - } - } -} - -func (b *GRPCBroker) knock(id uint32) error { - // Send a knock. - err := b.streamer.Send(&plugin.ConnInfo{ - ServiceId: id, - Knock: &plugin.ConnInfo_Knock{ - Knock: true, - }, - }) - if err != nil { - return err - } - - // Wait for the ack. - p := b.getClientStream(id) - select { - case msg := <-p.ch: - if msg.ServiceId != id { - return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId) - } - if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack { - return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock) - } - if msg.Knock.Error != "" { - return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error) - } - case <-time.After(5 * time.Second): - return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id) - } - - return nil -} - -func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) { - return func(string, time.Duration) (net.Conn, error) { - b.dialMutex.Lock() - defer b.dialMutex.Unlock() - - // Tell the other side the listener ID it should give the next stream to. - err := b.knock(id) - if err != nil { - return nil, fmt.Errorf("failed to knock before dialling client: %w", err) - } - - conn, err := b.muxer.Dial() - if err != nil { - return nil, err - } - - return conn, nil - } -} - -// Dial opens a connection by ID. -func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { - if b.muxer.Enabled() { - return dialGRPCConn(b.tls, b.muxDial(id)) - } - - var c *plugin.ConnInfo - - // Open the stream - p := b.getClientStream(id) - select { - case c = <-p.ch: - close(p.doneCh) - case <-time.After(5 * time.Second): - return nil, fmt.Errorf("timeout waiting for connection info") - } - - network, address := c.Network, c.Address - if b.addrTranslator != nil { - network, address, err = b.addrTranslator.PluginToHost(network, address) - if err != nil { - return nil, err - } - } - - var addr net.Addr - switch network { - case "tcp": - addr, err = net.ResolveTCPAddr("tcp", address) - case "unix": - addr, err = net.ResolveUnixAddr("unix", address) - default: - err = fmt.Errorf("Unknown address type: %s", c.Address) - } - if err != nil { - return nil, err - } - - return dialGRPCConn(b.tls, netAddrDialer(addr)) -} - -// NextId returns a unique ID to use next. -// -// It is possible for very long-running plugin hosts to wrap this value, -// though it would require a very large amount of calls. In practice -// we've never seen it happen. -func (m *GRPCBroker) NextId() uint32 { - return atomic.AddUint32(&m.nextId, 1) -} - -// Run starts the brokering and should be executed in a goroutine, since it -// blocks forever, or until the session closes. -// -// Uses of GRPCBroker never need to call this. It is called internally by -// the plugin host/client. -func (m *GRPCBroker) Run() { - for { - msg, err := m.streamer.Recv() - if err != nil { - // Once we receive an error, just exit - break - } - - // Initialize the waiter - var p *gRPCBrokerPending - if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack { - p = m.getServerStream(msg.ServiceId) - // The server side doesn't close the channel immediately as it needs - // to continuously listen for knocks. - } else { - p = m.getClientStream(msg.ServiceId) - go m.timeoutWait(msg.ServiceId, p) - } - select { - case p.ch <- msg: - default: - } - } -} - -// getClientStream is a buffer to receive new connection info and knock acks -// by stream ID. -func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.clientStreams[id] - if ok { - return p - } - - m.clientStreams[id] = &gRPCBrokerPending{ - ch: make(chan *plugin.ConnInfo, 1), - doneCh: make(chan struct{}), - } - return m.clientStreams[id] -} - -// getServerStream is a buffer to receive knocks to a multiplexed stream ID -// that its side is listening on. Not used unless multiplexing is enabled. -func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.serverStreams[id] - if ok { - return p - } - - m.serverStreams[id] = &gRPCBrokerPending{ - ch: make(chan *plugin.ConnInfo, 1), - doneCh: make(chan struct{}), - } - return m.serverStreams[id] -} - -func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { - // Wait for the stream to either be picked up and connected, or - // for a timeout. - select { - case <-p.doneCh: - case <-time.After(5 * time.Second): - } - - m.Lock() - defer m.Unlock() - - // Delete the stream so no one else can grab it - delete(m.clientStreams, id) -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go deleted file mode 100644 index 627649d8394..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - "crypto/tls" - "fmt" - "math" - "net" - "time" - - "github.com/hashicorp/go-plugin/internal/plugin" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/health/grpc_health_v1" -) - -func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) { - // Build dialing options. - opts := make([]grpc.DialOption, 0) - - // We use a custom dialer so that we can connect over unix domain sockets. - opts = append(opts, grpc.WithDialer(dialer)) - - // Fail right away - opts = append(opts, grpc.FailOnNonTempDialError(true)) - - // If we have no TLS configuration set, we need to explicitly tell grpc - // that we're connecting with an insecure connection. - if tls == nil { - opts = append(opts, grpc.WithInsecure()) - } else { - opts = append(opts, grpc.WithTransportCredentials( - credentials.NewTLS(tls))) - } - - opts = append(opts, - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), - grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) - - // Add our custom options if we have any - opts = append(opts, dialOpts...) - - // Connect. Note the first parameter is unused because we use a custom - // dialer that has the state to see the address. - conn, err := grpc.Dial("unused", opts...) - if err != nil { - return nil, err - } - - return conn, nil -} - -// newGRPCClient creates a new GRPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { - conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...) - if err != nil { - return nil, err - } - - muxer, err := c.getGRPCMuxer(c.address) - if err != nil { - return nil, err - } - - // Start the broker. - brokerGRPCClient := newGRPCBrokerClient(conn) - broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer) - go broker.Run() - go brokerGRPCClient.StartStream() - - // Start the stdio client - stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) - if err != nil { - return nil, err - } - go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) - - cl := &GRPCClient{ - Conn: conn, - Plugins: c.config.Plugins, - doneCtx: doneCtx, - broker: broker, - controller: plugin.NewGRPCControllerClient(conn), - } - - return cl, nil -} - -// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. -type GRPCClient struct { - Conn *grpc.ClientConn - Plugins map[string]Plugin - - doneCtx context.Context - broker *GRPCBroker - - controller plugin.GRPCControllerClient -} - -// ClientProtocol impl. -func (c *GRPCClient) Close() error { - c.broker.Close() - c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) - return c.Conn.Close() -} - -// ClientProtocol impl. -func (c *GRPCClient) Dispense(name string) (interface{}, error) { - raw, ok := c.Plugins[name] - if !ok { - return nil, fmt.Errorf("unknown plugin type: %s", name) - } - - p, ok := raw.(GRPCPlugin) - if !ok { - return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) - } - - return p.GRPCClient(c.doneCtx, c.broker, c.Conn) -} - -// ClientProtocol impl. -func (c *GRPCClient) Ping() error { - client := grpc_health_v1.NewHealthClient(c.Conn) - _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ - Service: GRPCServiceName, - }) - - return err -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go deleted file mode 100644 index 2085356cd34..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - - "github.com/hashicorp/go-plugin/internal/plugin" -) - -// GRPCControllerServer handles shutdown calls to terminate the server when the -// plugin client is closed. -type grpcControllerServer struct { - server *GRPCServer -} - -// Shutdown stops the grpc server. It first will attempt a graceful stop, then a -// full stop on the server. -func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { - resp := &plugin.Empty{} - - // TODO: figure out why GracefullStop doesn't work. - s.server.Stop() - return resp, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go deleted file mode 100644 index a5f40c7f06e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "net" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/hashicorp/go-plugin/internal/plugin" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/reflection" -) - -// GRPCServiceName is the name of the service that the health check should -// return as passing. -const GRPCServiceName = "plugin" - -// DefaultGRPCServer can be used with the "GRPCServer" field for Server -// as a default factory method to create a gRPC server with no extra options. -func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { - return grpc.NewServer(opts...) -} - -// GRPCServer is a ServerType implementation that serves plugins over -// gRPC. This allows plugins to easily be written for other languages. -// -// The GRPCServer outputs a custom configuration as a base64-encoded -// JSON structure represented by the GRPCServerConfig config structure. -type GRPCServer struct { - // Plugins are the list of plugins to serve. - Plugins map[string]Plugin - - // Server is the actual server that will accept connections. This - // will be used for plugin registration as well. - Server func([]grpc.ServerOption) *grpc.Server - - // TLS should be the TLS configuration if available. If this is nil, - // the connection will not have transport security. - TLS *tls.Config - - // DoneCh is the channel that is closed when this server has exited. - DoneCh chan struct{} - - // Stdout/StderrLis are the readers for stdout/stderr that will be copied - // to the stdout/stderr connection that is output. - Stdout io.Reader - Stderr io.Reader - - config GRPCServerConfig - server *grpc.Server - broker *GRPCBroker - stdioServer *grpcStdioServer - - logger hclog.Logger - - muxer *grpcmux.GRPCServerMuxer -} - -// ServerProtocol impl. -func (s *GRPCServer) Init() error { - // Create our server - var opts []grpc.ServerOption - if s.TLS != nil { - opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) - } - s.server = s.Server(opts) - - // Register the health service - healthCheck := health.NewServer() - healthCheck.SetServingStatus( - GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) - grpc_health_v1.RegisterHealthServer(s.server, healthCheck) - - // Register the reflection service - reflection.Register(s.server) - - // Register the broker service - brokerServer := newGRPCBrokerServer() - plugin.RegisterGRPCBrokerServer(s.server, brokerServer) - s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer) - go s.broker.Run() - - // Register the controller - controllerServer := &grpcControllerServer{server: s} - plugin.RegisterGRPCControllerServer(s.server, controllerServer) - - // Register the stdio service - s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) - plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) - - // Register all our plugins onto the gRPC server. - for k, raw := range s.Plugins { - p, ok := raw.(GRPCPlugin) - if !ok { - return fmt.Errorf("%q is not a GRPC-compatible plugin", k) - } - - if err := p.GRPCServer(s.broker, s.server); err != nil { - return fmt.Errorf("error registering %q: %s", k, err) - } - } - - return nil -} - -// Stop calls Stop on the underlying grpc.Server and Close on the underlying -// grpc.Broker if present. -func (s *GRPCServer) Stop() { - s.server.Stop() - - if s.broker != nil { - s.broker.Close() - s.broker = nil - } -} - -// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on -// the underlying grpc.Broker if present. -func (s *GRPCServer) GracefulStop() { - s.server.GracefulStop() - - if s.broker != nil { - s.broker.Close() - s.broker = nil - } -} - -// Config is the GRPCServerConfig encoded as JSON then base64. -func (s *GRPCServer) Config() string { - // Create a buffer that will contain our final contents - var buf bytes.Buffer - - // Wrap the base64 encoding with JSON encoding. - if err := json.NewEncoder(&buf).Encode(s.config); err != nil { - // We panic since ths shouldn't happen under any scenario. We - // carefully control the structure being encoded here and it should - // always be successful. - panic(err) - } - - return buf.String() -} - -func (s *GRPCServer) Serve(lis net.Listener) { - defer close(s.DoneCh) - err := s.server.Serve(lis) - if err != nil { - s.logger.Error("grpc server", "error", err) - } -} - -// GRPCServerConfig is the extra configuration passed along for consumers -// to facilitate using GRPC plugins. -type GRPCServerConfig struct { - StdoutAddr string `json:"stdout_addr"` - StderrAddr string `json:"stderr_addr"` -} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go deleted file mode 100644 index ae06c116313..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bufio" - "bytes" - "context" - "io" - - empty "github.com/golang/protobuf/ptypes/empty" - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/plugin" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of -// stdio data. This is currently 1 KB for no reason other than that seems like -// enough (stdio data isn't that common) and is fairly low. -const grpcStdioBuffer = 1 * 1024 - -// grpcStdioServer implements the Stdio service and streams stdiout/stderr. -type grpcStdioServer struct { - stdoutCh <-chan []byte - stderrCh <-chan []byte -} - -// newGRPCStdioServer creates a new grpcStdioServer and starts the stream -// copying for the given out and err readers. -// -// This must only be called ONCE per srcOut, srcErr. -func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { - stdoutCh := make(chan []byte) - stderrCh := make(chan []byte) - - // Begin copying the streams - go copyChan(log, stdoutCh, srcOut) - go copyChan(log, stderrCh, srcErr) - - // Construct our server - return &grpcStdioServer{ - stdoutCh: stdoutCh, - stderrCh: stderrCh, - } -} - -// StreamStdio streams our stdout/err as the response. -func (s *grpcStdioServer) StreamStdio( - _ *empty.Empty, - srv plugin.GRPCStdio_StreamStdioServer, -) error { - // Share the same data value between runs. Sending this over the wire - // marshals it so we can reuse this. - var data plugin.StdioData - - for { - // Read our data - select { - case data.Data = <-s.stdoutCh: - data.Channel = plugin.StdioData_STDOUT - - case data.Data = <-s.stderrCh: - data.Channel = plugin.StdioData_STDERR - - case <-srv.Context().Done(): - return nil - } - - // Not sure if this is possible, but if we somehow got here and - // we didn't populate any data at all, then just continue. - if len(data.Data) == 0 { - continue - } - - // Send our data to the client. - if err := srv.Send(&data); err != nil { - return err - } - } -} - -// grpcStdioClient wraps the stdio service as a client to copy -// the stdio data to output writers. -type grpcStdioClient struct { - log hclog.Logger - stdioClient plugin.GRPCStdio_StreamStdioClient -} - -// newGRPCStdioClient creates a grpcStdioClient. This will perform the -// initial connection to the stdio service. If the stdio service is unavailable -// then this will be a no-op. This allows this to work without error for -// plugins that don't support this. -func newGRPCStdioClient( - ctx context.Context, - log hclog.Logger, - conn *grpc.ClientConn, -) (*grpcStdioClient, error) { - client := plugin.NewGRPCStdioClient(conn) - - // Connect immediately to the endpoint - stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) - - // If we get an Unavailable or Unimplemented error, this means that the plugin isn't - // updated and linking to the latest version of go-plugin that supports - // this. We fall back to the previous behavior of just not syncing anything. - if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { - log.Warn("stdio service not available, stdout/stderr syncing unavailable") - stdioClient = nil - err = nil - } - if err != nil { - return nil, err - } - - return &grpcStdioClient{ - log: log, - stdioClient: stdioClient, - }, nil -} - -// Run starts the loop that receives stdio data and writes it to the given -// writers. This blocks and should be run in a goroutine. -func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { - // This will be nil if stdio is not supported by the plugin - if c.stdioClient == nil { - c.log.Warn("stdio service unavailable, run will do nothing") - return - } - - for { - c.log.Trace("waiting for stdio data") - data, err := c.stdioClient.Recv() - if err != nil { - if err == io.EOF || - status.Code(err) == codes.Unavailable || - status.Code(err) == codes.Canceled || - status.Code(err) == codes.Unimplemented || - err == context.Canceled { - c.log.Debug("received EOF, stopping recv loop", "err", err) - return - } - - c.log.Error("error receiving data", "err", err) - return - } - - // Determine our output writer based on channel - var w io.Writer - switch data.Channel { - case plugin.StdioData_STDOUT: - w = stdout - - case plugin.StdioData_STDERR: - w = stderr - - default: - c.log.Warn("unknown channel, dropping", "channel", data.Channel) - continue - } - - // Write! In the event of an error we just continue. - if c.log.IsTrace() { - c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) - } - if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { - c.log.Error("failed to copy all bytes", "err", err) - } - } -} - -// copyChan copies an io.Reader into a channel. -func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { - bufsrc := bufio.NewReader(src) - - for { - // Make our data buffer. We allocate a new one per loop iteration - // so that we can send it over the channel. - var data [1024]byte - - // Read the data, this will block until data is available - n, err := bufsrc.Read(data[:]) - - // We have to check if we have data BEFORE err != nil. The bufio - // docs guarantee n == 0 on EOF but its better to be safe here. - if n > 0 { - // We have data! Send it on the channel. This will block if there - // is no reader on the other side. We expect that go-plugin will - // connect immediately to the stdio server to drain this so we want - // this block to happen for backpressure. - dst <- data[:n] - } - - // If we hit EOF we're done copying - if err == io.EOF { - log.Debug("stdio EOF, exiting copy loop") - return - } - - // Any other error we just exit the loop. We don't expect there to - // be errors since our use case for this is reading/writing from - // a in-process pipe (os.Pipe). - if err != nil { - log.Warn("error copying stdio data, stopping copy", "err", err) - return - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go deleted file mode 100644 index 1854d2dd531..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -// addrTranslator implements stateless identity functions, as the host and plugin -// run in the same context wrt Unix and network addresses. -type addrTranslator struct{} - -func (*addrTranslator) PluginToHost(pluginNet, pluginAddr string) (string, string, error) { - return pluginNet, pluginAddr, nil -} - -func (*addrTranslator) HostToPlugin(hostNet, hostAddr string) (string, string, error) { - return hostNet, hostAddr, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go deleted file mode 100644 index dce1a86a88e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import ( - "context" - "fmt" - "net" - "os" - - "github.com/hashicorp/go-plugin/runner" -) - -// ReattachFunc returns a function that allows reattaching to a plugin running -// as a plain process. The process may or may not be a child process. -func ReattachFunc(pid int, addr net.Addr) runner.ReattachFunc { - return func() (runner.AttachedRunner, error) { - p, err := os.FindProcess(pid) - if err != nil { - // On Unix systems, FindProcess never returns an error. - // On Windows, for non-existent pids it returns: - // os.SyscallError - 'OpenProcess: the paremter is incorrect' - return nil, ErrProcessNotFound - } - - // Attempt to connect to the addr since on Unix systems FindProcess - // doesn't actually return an error if it can't find the process. - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - p.Kill() - return nil, ErrProcessNotFound - } - conn.Close() - - return &CmdAttachedRunner{ - pid: pid, - process: p, - }, nil - } -} - -// CmdAttachedRunner is mostly a subset of CmdRunner, except the Wait function -// does not assume the process is a child of the host process, and so uses a -// different implementation to wait on the process. -type CmdAttachedRunner struct { - pid int - process *os.Process - - addrTranslator -} - -func (c *CmdAttachedRunner) Wait(_ context.Context) error { - return pidWait(c.pid) -} - -func (c *CmdAttachedRunner) Kill(_ context.Context) error { - return c.process.Kill() -} - -func (c *CmdAttachedRunner) ID() string { - return fmt.Sprintf("%d", c.pid) -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go deleted file mode 100644 index b26fea928e2..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/runner" -) - -var ( - _ runner.Runner = (*CmdRunner)(nil) - - // ErrProcessNotFound is returned when a client is instantiated to - // reattach to an existing process and it isn't found. - ErrProcessNotFound = errors.New("Reattachment process not found") -) - -const unrecognizedRemotePluginMessage = `This usually means - the plugin was not compiled for this architecture, - the plugin is missing dynamic-link libraries necessary to run, - the plugin is not executable by this process due to file permissions, or - the plugin failed to negotiate the initial go-plugin protocol handshake -%s` - -// CmdRunner implements the runner.Runner interface. It mostly just passes through -// to exec.Cmd methods. -type CmdRunner struct { - logger hclog.Logger - cmd *exec.Cmd - - stdout io.ReadCloser - stderr io.ReadCloser - - // Cmd info is persisted early, since the process information will be removed - // after Kill is called. - path string - pid int - - addrTranslator -} - -// NewCmdRunner returns an implementation of runner.Runner for running a plugin -// as a subprocess. It must be passed a cmd that hasn't yet been started. -func NewCmdRunner(logger hclog.Logger, cmd *exec.Cmd) (*CmdRunner, error) { - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - - return &CmdRunner{ - logger: logger, - cmd: cmd, - stdout: stdout, - stderr: stderr, - path: cmd.Path, - }, nil -} - -func (c *CmdRunner) Start(_ context.Context) error { - c.logger.Debug("starting plugin", "path", c.cmd.Path, "args", c.cmd.Args) - err := c.cmd.Start() - if err != nil { - return err - } - - c.pid = c.cmd.Process.Pid - c.logger.Debug("plugin started", "path", c.path, "pid", c.pid) - return nil -} - -func (c *CmdRunner) Wait(_ context.Context) error { - return c.cmd.Wait() -} - -func (c *CmdRunner) Kill(_ context.Context) error { - if c.cmd.Process != nil { - err := c.cmd.Process.Kill() - // Swallow ErrProcessDone, we support calling Kill multiple times. - if !errors.Is(err, os.ErrProcessDone) { - return err - } - return nil - } - - return nil -} - -func (c *CmdRunner) Stdout() io.ReadCloser { - return c.stdout -} - -func (c *CmdRunner) Stderr() io.ReadCloser { - return c.stderr -} - -func (c *CmdRunner) Name() string { - return c.path -} - -func (c *CmdRunner) ID() string { - return fmt.Sprintf("%d", c.pid) -} - -// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format -// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports. -var peTypes = map[uint16]string{ - 0x14c: "386", - 0x1c0: "arm", - 0x6264: "loong64", - 0x8664: "amd64", - 0xaa64: "arm64", -} - -func (c *CmdRunner) Diagnose(_ context.Context) string { - return fmt.Sprintf(unrecognizedRemotePluginMessage, additionalNotesAboutCommand(c.cmd.Path)) -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go deleted file mode 100644 index ce04cfebc67..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !windows -// +build !windows - -package cmdrunner - -import ( - "debug/elf" - "debug/macho" - "debug/pe" - "fmt" - "os" - "os/user" - "runtime" - "strconv" - "syscall" -) - -// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose -// why it won't run correctly. It runs as a best effort only. -func additionalNotesAboutCommand(path string) string { - notes := "" - stat, err := os.Stat(path) - if err != nil { - return notes - } - - notes += "\nAdditional notes about plugin:\n" - notes += fmt.Sprintf(" Path: %s\n", path) - notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) - statT, ok := stat.Sys().(*syscall.Stat_t) - if ok { - currentUsername := "?" - if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil { - currentUsername = u.Username - } - currentGroup := "?" - if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil { - currentGroup = g.Name - } - username := "?" - if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil { - username = u.Username - } - group := "?" - if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil { - group = g.Name - } - notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername) - notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup) - } - - if elfFile, err := elf.Open(path); err == nil { - defer elfFile.Close() - notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) - } else if machoFile, err := macho.Open(path); err == nil { - defer machoFile.Close() - notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) - } else if peFile, err := pe.Open(path); err == nil { - defer peFile.Close() - machine, ok := peTypes[peFile.Machine] - if !ok { - machine = "unknown" - } - notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) - } - return notes -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go deleted file mode 100644 index 39c51dd1e08..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build windows -// +build windows - -package cmdrunner - -import ( - "debug/elf" - "debug/macho" - "debug/pe" - "fmt" - "os" - "runtime" -) - -// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose -// why it won't run correctly. It runs as a best effort only. -func additionalNotesAboutCommand(path string) string { - notes := "" - stat, err := os.Stat(path) - if err != nil { - return notes - } - - notes += "\nAdditional notes about plugin:\n" - notes += fmt.Sprintf(" Path: %s\n", path) - notes += fmt.Sprintf(" Mode: %s\n", stat.Mode()) - - if elfFile, err := elf.Open(path); err == nil { - defer elfFile.Close() - notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH) - } else if machoFile, err := macho.Open(path); err == nil { - defer machoFile.Close() - notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH) - } else if peFile, err := pe.Open(path); err == nil { - defer peFile.Close() - machine, ok := peTypes[peFile.Machine] - if !ok { - machine = "unknown" - } - notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH) - } - return notes -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go deleted file mode 100644 index 6c34dc7747f..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import "time" - -// pidAlive checks whether a pid is alive. -func pidAlive(pid int) bool { - return _pidAlive(pid) -} - -// pidWait blocks for a process to exit. -func pidWait(pid int) error { - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - - for range ticker.C { - if !pidAlive(pid) { - break - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go deleted file mode 100644 index bf3fc5b683e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:build !windows -// +build !windows - -package cmdrunner - -import ( - "os" - "syscall" -) - -// _pidAlive tests whether a process is alive or not by sending it Signal 0, -// since Go otherwise has no way to test this. -func _pidAlive(pid int) bool { - proc, err := os.FindProcess(pid) - if err == nil { - err = proc.Signal(syscall.Signal(0)) - } - - return err == nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go deleted file mode 100644 index 6c39df28f7e..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cmdrunner - -import ( - "syscall" -) - -const ( - // Weird name but matches the MSDN docs - exit_STILL_ACTIVE = 259 - - processDesiredAccess = syscall.STANDARD_RIGHTS_READ | - syscall.PROCESS_QUERY_INFORMATION | - syscall.SYNCHRONIZE -) - -// _pidAlive tests whether a process is alive or not -func _pidAlive(pid int) bool { - h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid)) - if err != nil { - return false - } - defer syscall.CloseHandle(h) - - var ec uint32 - if e := syscall.GetExitCodeProcess(h, &ec); e != nil { - return false - } - - return ec == exit_STILL_ACTIVE -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go deleted file mode 100644 index e8a3a152a13..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "io" - "net" - - "github.com/hashicorp/yamux" -) - -var _ net.Listener = (*blockedClientListener)(nil) - -// blockedClientListener accepts connections for a specific gRPC broker stream -// ID on the client (host) side of the connection. -type blockedClientListener struct { - session *yamux.Session - waitCh chan struct{} - doneCh <-chan struct{} -} - -func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener { - return &blockedClientListener{ - waitCh: make(chan struct{}, 1), - doneCh: doneCh, - session: session, - } -} - -func (b *blockedClientListener) Accept() (net.Conn, error) { - select { - case <-b.waitCh: - return b.session.Accept() - case <-b.doneCh: - return nil, io.EOF - } -} - -func (b *blockedClientListener) Addr() net.Addr { - return b.session.Addr() -} - -func (b *blockedClientListener) Close() error { - // We don't close the session, the client muxer is responsible for that. - return nil -} - -func (b *blockedClientListener) unblock() { - b.waitCh <- struct{}{} -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go deleted file mode 100644 index 0edb2c05d26..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "io" - "net" -) - -var _ net.Listener = (*blockedServerListener)(nil) - -// blockedServerListener accepts connections for a specific gRPC broker stream -// ID on the server (plugin) side of the connection. -type blockedServerListener struct { - addr net.Addr - acceptCh chan acceptResult - doneCh <-chan struct{} -} - -type acceptResult struct { - conn net.Conn - err error -} - -func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener { - return &blockedServerListener{ - addr: addr, - acceptCh: make(chan acceptResult), - doneCh: doneCh, - } -} - -func (b *blockedServerListener) Accept() (net.Conn, error) { - select { - case accept := <-b.acceptCh: - return accept.conn, accept.err - case <-b.doneCh: - return nil, io.EOF - } -} - -func (b *blockedServerListener) Addr() net.Addr { - return b.addr -} - -func (b *blockedServerListener) Close() error { - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go deleted file mode 100644 index b203ba467b2..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "fmt" - "net" - "sync" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/yamux" -) - -var _ GRPCMuxer = (*GRPCClientMuxer)(nil) - -// GRPCClientMuxer implements the client (host) side of the gRPC broker's -// GRPCMuxer interface for multiplexing multiple gRPC broker connections over -// a single net.Conn. -// -// The client dials the initial net.Conn eagerly, and creates a yamux.Session -// as the implementation for multiplexing any additional connections. -// -// Each net.Listener returned from Listener will block until the client receives -// a knock that matches its gRPC broker stream ID. There is no default listener -// on the client, as it is a client for the gRPC broker's control services. (See -// GRPCServerMuxer for more details). -type GRPCClientMuxer struct { - logger hclog.Logger - session *yamux.Session - - acceptMutex sync.Mutex - acceptListeners map[uint32]*blockedClientListener -} - -func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) { - // Eagerly establish the underlying connection as early as possible. - logger.Debug("making new client mux initial connection", "addr", addr) - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - _ = tcpConn.SetKeepAlive(true) - } - - cfg := yamux.DefaultConfig() - cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ - InferLevels: true, - }) - cfg.LogOutput = nil - sess, err := yamux.Client(conn, cfg) - if err != nil { - return nil, err - } - - logger.Debug("client muxer connected", "addr", addr) - m := &GRPCClientMuxer{ - logger: logger, - session: sess, - acceptListeners: make(map[uint32]*blockedClientListener), - } - - return m, nil -} - -func (m *GRPCClientMuxer) Enabled() bool { - return m != nil -} - -func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { - ln := newBlockedClientListener(m.session, doneCh) - - m.acceptMutex.Lock() - m.acceptListeners[id] = ln - m.acceptMutex.Unlock() - - return ln, nil -} - -func (m *GRPCClientMuxer) AcceptKnock(id uint32) error { - m.acceptMutex.Lock() - defer m.acceptMutex.Unlock() - - ln, ok := m.acceptListeners[id] - if !ok { - return fmt.Errorf("no listener for id %d", id) - } - ln.unblock() - return nil -} - -func (m *GRPCClientMuxer) Dial() (net.Conn, error) { - stream, err := m.session.Open() - if err != nil { - return nil, fmt.Errorf("error dialling new client stream: %w", err) - } - - return stream, nil -} - -func (m *GRPCClientMuxer) Close() error { - return m.session.Close() -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go deleted file mode 100644 index c52aaf553e9..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "net" -) - -// GRPCMuxer enables multiple implementations of net.Listener to accept -// connections over a single "main" multiplexed net.Conn, and dial multiple -// client connections over the same multiplexed net.Conn. -// -// The first multiplexed connection is used to serve the gRPC broker's own -// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio. -// -// Clients must "knock" before dialling, to tell the server side that the -// next net.Conn should be accepted onto a specific stream ID. The knock is a -// bidirectional streaming message on the plugin.GRPCBroker service. -type GRPCMuxer interface { - // Enabled determines whether multiplexing should be used. It saves users - // of the interface from having to compare an interface with nil, which - // is a bit awkward to do correctly. - Enabled() bool - - // Listener returns a multiplexed listener that will wait until AcceptKnock - // is called with a matching ID before its Accept function returns. - Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) - - // AcceptKnock unblocks the listener with the matching ID, and returns an - // error if it hasn't been created yet. - AcceptKnock(id uint32) error - - // Dial makes a new multiplexed client connection. To dial a specific ID, - // a knock must be sent first. - Dial() (net.Conn, error) - - // Close closes connections and releases any resources associated with the - // muxer. - Close() error -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go deleted file mode 100644 index 27696ee769d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grpcmux - -import ( - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/yamux" -) - -var _ GRPCMuxer = (*GRPCServerMuxer)(nil) -var _ net.Listener = (*GRPCServerMuxer)(nil) - -// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's -// GRPCMuxer interface for multiplexing multiple gRPC broker connections over -// a single net.Conn. -// -// The server side needs a listener to serve the gRPC broker's control services, -// which includes the service we will receive knocks on. That means we always -// accept the first connection onto a "default" main listener, and if we accept -// any further connections without receiving a knock first, they are also given -// to the default listener. -// -// When creating additional multiplexed listeners for specific stream IDs, we -// can't control the order in which gRPC servers will call Accept() on each -// listener, but we do need to control which gRPC server accepts which connection. -// As such, each multiplexed listener blocks waiting on a channel. It will be -// unblocked when a knock is received for the matching stream ID. -type GRPCServerMuxer struct { - addr net.Addr - logger hclog.Logger - - sessionErrCh chan error - sess *yamux.Session - - knockCh chan uint32 - - acceptMutex sync.Mutex - acceptChannels map[uint32]chan acceptResult -} - -func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer { - m := &GRPCServerMuxer{ - addr: ln.Addr(), - logger: logger, - - sessionErrCh: make(chan error), - - knockCh: make(chan uint32, 1), - acceptChannels: make(map[uint32]chan acceptResult), - } - - go m.acceptSession(ln) - - return m -} - -// acceptSessionAndMuxAccept is responsible for establishing the yamux session, -// and then kicking off the acceptLoop function. -func (m *GRPCServerMuxer) acceptSession(ln net.Listener) { - defer close(m.sessionErrCh) - - m.logger.Debug("accepting initial connection", "addr", m.addr) - conn, err := ln.Accept() - if err != nil { - m.sessionErrCh <- err - return - } - - m.logger.Debug("initial server connection accepted", "addr", m.addr) - cfg := yamux.DefaultConfig() - cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ - InferLevels: true, - }) - cfg.LogOutput = nil - m.sess, err = yamux.Server(conn, cfg) - if err != nil { - m.sessionErrCh <- err - return - } -} - -func (m *GRPCServerMuxer) session() (*yamux.Session, error) { - select { - case err := <-m.sessionErrCh: - if err != nil { - return nil, err - } - case <-time.After(5 * time.Second): - return nil, errors.New("timed out waiting for connection to be established") - } - - // Should never happen. - if m.sess == nil { - return nil, errors.New("no connection established and no error received") - } - - return m.sess, nil -} - -// Accept accepts all incoming connections and routes them to the correct -// stream ID based on the most recent knock received. -func (m *GRPCServerMuxer) Accept() (net.Conn, error) { - session, err := m.session() - if err != nil { - return nil, fmt.Errorf("error establishing yamux session: %w", err) - } - - for { - conn, acceptErr := session.Accept() - - select { - case id := <-m.knockCh: - m.acceptMutex.Lock() - acceptCh, ok := m.acceptChannels[id] - m.acceptMutex.Unlock() - - if !ok { - if conn != nil { - _ = conn.Close() - } - return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id) - } - m.logger.Debug("sending conn to brokered listener", "id", id) - acceptCh <- acceptResult{ - conn: conn, - err: acceptErr, - } - default: - m.logger.Debug("sending conn to default listener") - return conn, acceptErr - } - } -} - -func (m *GRPCServerMuxer) Addr() net.Addr { - return m.addr -} - -func (m *GRPCServerMuxer) Close() error { - session, err := m.session() - if err != nil { - return err - } - - return session.Close() -} - -func (m *GRPCServerMuxer) Enabled() bool { - return m != nil -} - -func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { - sess, err := m.session() - if err != nil { - return nil, err - } - - ln := newBlockedServerListener(sess.Addr(), doneCh) - m.acceptMutex.Lock() - m.acceptChannels[id] = ln.acceptCh - m.acceptMutex.Unlock() - - return ln, nil -} - -func (m *GRPCServerMuxer) Dial() (net.Conn, error) { - sess, err := m.session() - if err != nil { - return nil, err - } - - stream, err := sess.OpenStream() - if err != nil { - return nil, fmt.Errorf("error dialling new server stream: %w", err) - } - - return stream, nil -} - -func (m *GRPCServerMuxer) AcceptKnock(id uint32) error { - m.knockCh <- id - return nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go deleted file mode 100644 index acc6dc9c77f..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: internal/plugin/grpc_broker.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ConnInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` - Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` - Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"` -} - -func (x *ConnInfo) Reset() { - *x = ConnInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConnInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnInfo) ProtoMessage() {} - -func (x *ConnInfo) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead. -func (*ConnInfo) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0} -} - -func (x *ConnInfo) GetServiceId() uint32 { - if x != nil { - return x.ServiceId - } - return 0 -} - -func (x *ConnInfo) GetNetwork() string { - if x != nil { - return x.Network - } - return "" -} - -func (x *ConnInfo) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *ConnInfo) GetKnock() *ConnInfo_Knock { - if x != nil { - return x.Knock - } - return nil -} - -type ConnInfo_Knock struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"` - Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *ConnInfo_Knock) Reset() { - *x = ConnInfo_Knock{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConnInfo_Knock) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConnInfo_Knock) ProtoMessage() {} - -func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead. -func (*ConnInfo_Knock) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *ConnInfo_Knock) GetKnock() bool { - if x != nil { - return x.Knock - } - return false -} - -func (x *ConnInfo_Knock) GetAck() bool { - if x != nil { - return x.Ack - } - return false -} - -func (x *ConnInfo_Knock) GetError() string { - if x != nil { - return x.Error - } - return "" -} - -var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor - -var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08, - 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, - 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b, - 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f, - 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f, - 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35, - 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a, - 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once - file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc -) - -func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte { - file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() { - file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData) - }) - return file_internal_plugin_grpc_broker_proto_rawDescData -} - -var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{ - (*ConnInfo)(nil), // 0: plugin.ConnInfo - (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock -} -var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{ - 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock - 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo - 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_plugin_grpc_broker_proto_init() } -func file_internal_plugin_grpc_broker_proto_init() { - if File_internal_plugin_grpc_broker_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConnInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConnInfo_Knock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_plugin_grpc_broker_proto_goTypes, - DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs, - MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes, - }.Build() - File_internal_plugin_grpc_broker_proto = out.File - file_internal_plugin_grpc_broker_proto_rawDesc = nil - file_internal_plugin_grpc_broker_proto_goTypes = nil - file_internal_plugin_grpc_broker_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto deleted file mode 100644 index c92cd645cb6..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; -package plugin; -option go_package = "./plugin"; - -message ConnInfo { - uint32 service_id = 1; - string network = 2; - string address = 3; - message Knock { - bool knock = 1; - bool ack = 2; - string error = 3; - } - Knock knock = 4; -} - -service GRPCBroker { - rpc StartStream(stream ConnInfo) returns (stream ConnInfo); -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go deleted file mode 100644 index 1b0f80705d8..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: internal/plugin/grpc_broker.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream" -) - -// GRPCBrokerClient is the client API for GRPCBroker service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GRPCBrokerClient interface { - StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) -} - -type gRPCBrokerClient struct { - cc grpc.ClientConnInterface -} - -func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient { - return &gRPCBrokerClient{cc} -} - -func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &gRPCBrokerStartStreamClient{stream} - return x, nil -} - -type GRPCBroker_StartStreamClient interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ClientStream -} - -type gRPCBrokerStartStreamClient struct { - grpc.ClientStream -} - -func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { - return x.ClientStream.SendMsg(m) -} - -func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// GRPCBrokerServer is the server API for GRPCBroker service. -// All implementations should embed UnimplementedGRPCBrokerServer -// for forward compatibility -type GRPCBrokerServer interface { - StartStream(GRPCBroker_StartStreamServer) error -} - -// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations. -type UnimplementedGRPCBrokerServer struct { -} - -func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error { - return status.Errorf(codes.Unimplemented, "method StartStream not implemented") -} - -// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GRPCBrokerServer will -// result in compilation errors. -type UnsafeGRPCBrokerServer interface { - mustEmbedUnimplementedGRPCBrokerServer() -} - -func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) { - s.RegisterService(&GRPCBroker_ServiceDesc, srv) -} - -func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) -} - -type GRPCBroker_StartStreamServer interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ServerStream -} - -type gRPCBrokerStartStreamServer struct { - grpc.ServerStream -} - -func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { - return x.ServerStream.SendMsg(m) -} - -func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var GRPCBroker_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCBroker", - HandlerType: (*GRPCBrokerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartStream", - Handler: _GRPCBroker_StartStream_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "internal/plugin/grpc_broker.proto", -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go deleted file mode 100644 index 8ca48e0d92d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: internal/plugin/grpc_controller.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *Empty) Reset() { - *x = Empty{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Empty) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Empty) ProtoMessage() {} - -func (x *Empty) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Empty.ProtoReflect.Descriptor instead. -func (*Empty) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0} -} - -var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor - -var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{ - 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, - 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68, - 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once - file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc -) - -func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte { - file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() { - file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData) - }) - return file_internal_plugin_grpc_controller_proto_rawDescData -} - -var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{ - (*Empty)(nil), // 0: plugin.Empty -} -var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{ - 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty - 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_internal_plugin_grpc_controller_proto_init() } -func file_internal_plugin_grpc_controller_proto_init() { - if File_internal_plugin_grpc_controller_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Empty); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_plugin_grpc_controller_proto_goTypes, - DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs, - MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes, - }.Build() - File_internal_plugin_grpc_controller_proto = out.File - file_internal_plugin_grpc_controller_proto_rawDesc = nil - file_internal_plugin_grpc_controller_proto_goTypes = nil - file_internal_plugin_grpc_controller_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto deleted file mode 100644 index 2755fa638b5..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; -package plugin; -option go_package = "./plugin"; - -message Empty { -} - -// The GRPCController is responsible for telling the plugin server to shutdown. -service GRPCController { - rpc Shutdown(Empty) returns (Empty); -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go deleted file mode 100644 index 427611aa00f..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: internal/plugin/grpc_controller.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown" -) - -// GRPCControllerClient is the client API for GRPCController service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GRPCControllerClient interface { - Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) -} - -type gRPCControllerClient struct { - cc grpc.ClientConnInterface -} - -func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient { - return &gRPCControllerClient{cc} -} - -func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// GRPCControllerServer is the server API for GRPCController service. -// All implementations should embed UnimplementedGRPCControllerServer -// for forward compatibility -type GRPCControllerServer interface { - Shutdown(context.Context, *Empty) (*Empty, error) -} - -// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations. -type UnimplementedGRPCControllerServer struct { -} - -func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") -} - -// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GRPCControllerServer will -// result in compilation errors. -type UnsafeGRPCControllerServer interface { - mustEmbedUnimplementedGRPCControllerServer() -} - -func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) { - s.RegisterService(&GRPCController_ServiceDesc, srv) -} - -func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GRPCControllerServer).Shutdown(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: GRPCController_Shutdown_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var GRPCController_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCController", - HandlerType: (*GRPCControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Shutdown", - Handler: _GRPCController_Shutdown_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "internal/plugin/grpc_controller.proto", -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go deleted file mode 100644 index 139cbb4a90b..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.31.0 -// protoc (unknown) -// source: internal/plugin/grpc_stdio.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StdioData_Channel int32 - -const ( - StdioData_INVALID StdioData_Channel = 0 - StdioData_STDOUT StdioData_Channel = 1 - StdioData_STDERR StdioData_Channel = 2 -) - -// Enum value maps for StdioData_Channel. -var ( - StdioData_Channel_name = map[int32]string{ - 0: "INVALID", - 1: "STDOUT", - 2: "STDERR", - } - StdioData_Channel_value = map[string]int32{ - "INVALID": 0, - "STDOUT": 1, - "STDERR": 2, - } -) - -func (x StdioData_Channel) Enum() *StdioData_Channel { - p := new(StdioData_Channel) - *p = x - return p -} - -func (x StdioData_Channel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor { - return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor() -} - -func (StdioData_Channel) Type() protoreflect.EnumType { - return &file_internal_plugin_grpc_stdio_proto_enumTypes[0] -} - -func (x StdioData_Channel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use StdioData_Channel.Descriptor instead. -func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { - return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0} -} - -// StdioData is a single chunk of stdout or stderr data that is streamed -// from GRPCStdio. -type StdioData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *StdioData) Reset() { - *x = StdioData{} - if protoimpl.UnsafeEnabled { - mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StdioData) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StdioData) ProtoMessage() {} - -func (x *StdioData) ProtoReflect() protoreflect.Message { - mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StdioData.ProtoReflect.Descriptor instead. -func (*StdioData) Descriptor() ([]byte, []int) { - return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0} -} - -func (x *StdioData) GetChannel() StdioData_Channel { - if x != nil { - return x.Channel - } - return StdioData_INVALID -} - -func (x *StdioData) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor - -var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69, - 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, - 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e, - 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, - 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54, - 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47, - 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69, - 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once - file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc -) - -func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte { - file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() { - file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData) - }) - return file_internal_plugin_grpc_stdio_proto_rawDescData -} - -var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{ - (StdioData_Channel)(0), // 0: plugin.StdioData.Channel - (*StdioData)(nil), // 1: plugin.StdioData - (*emptypb.Empty)(nil), // 2: google.protobuf.Empty -} -var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{ - 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel - 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty - 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_internal_plugin_grpc_stdio_proto_init() } -func file_internal_plugin_grpc_stdio_proto_init() { - if File_internal_plugin_grpc_stdio_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StdioData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc, - NumEnums: 1, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes, - DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs, - EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes, - MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes, - }.Build() - File_internal_plugin_grpc_stdio_proto = out.File - file_internal_plugin_grpc_stdio_proto_rawDesc = nil - file_internal_plugin_grpc_stdio_proto_goTypes = nil - file_internal_plugin_grpc_stdio_proto_depIdxs = nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto deleted file mode 100644 index f48ac76c978..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -syntax = "proto3"; -package plugin; -option go_package = "./plugin"; - -import "google/protobuf/empty.proto"; - -// GRPCStdio is a service that is automatically run by the plugin process -// to stream any stdout/err data so that it can be mirrored on the plugin -// host side. -service GRPCStdio { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); -} - -// StdioData is a single chunk of stdout or stderr data that is streamed -// from GRPCStdio. -message StdioData { - enum Channel { - INVALID = 0; - STDOUT = 1; - STDERR = 2; - } - - Channel channel = 1; - bytes data = 2; -} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go deleted file mode 100644 index f82b1503502..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc (unknown) -// source: internal/plugin/grpc_stdio.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio" -) - -// GRPCStdioClient is the client API for GRPCStdio service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type GRPCStdioClient interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) -} - -type gRPCStdioClient struct { - cc grpc.ClientConnInterface -} - -func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient { - return &gRPCStdioClient{cc} -} - -func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { - stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...) - if err != nil { - return nil, err - } - x := &gRPCStdioStreamStdioClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type GRPCStdio_StreamStdioClient interface { - Recv() (*StdioData, error) - grpc.ClientStream -} - -type gRPCStdioStreamStdioClient struct { - grpc.ClientStream -} - -func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { - m := new(StdioData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// GRPCStdioServer is the server API for GRPCStdio service. -// All implementations should embed UnimplementedGRPCStdioServer -// for forward compatibility -type GRPCStdioServer interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error -} - -// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations. -type UnimplementedGRPCStdioServer struct { -} - -func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error { - return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented") -} - -// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to GRPCStdioServer will -// result in compilation errors. -type UnsafeGRPCStdioServer interface { - mustEmbedUnimplementedGRPCStdioServer() -} - -func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) { - s.RegisterService(&GRPCStdio_ServiceDesc, srv) -} - -func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(emptypb.Empty) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) -} - -type GRPCStdio_StreamStdioServer interface { - Send(*StdioData) error - grpc.ServerStream -} - -type gRPCStdioStreamStdioServer struct { - grpc.ServerStream -} - -func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { - return x.ServerStream.SendMsg(m) -} - -// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var GRPCStdio_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCStdio", - HandlerType: (*GRPCStdioServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamStdio", - Handler: _GRPCStdio_StreamStdio_Handler, - ServerStreams: true, - }, - }, - Metadata: "internal/plugin/grpc_stdio.proto", -} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go deleted file mode 100644 index ab963d56b54..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/log_entry.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "encoding/json" - "time" -) - -// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host -type logEntry struct { - Message string `json:"@message"` - Level string `json:"@level"` - Timestamp time.Time `json:"timestamp"` - KVPairs []*logEntryKV `json:"kv_pairs"` -} - -// logEntryKV is a key value pair within the Output payload -type logEntryKV struct { - Key string `json:"key"` - Value interface{} `json:"value"` -} - -// flattenKVPairs is used to flatten KVPair slice into []interface{} -// for hclog consumption. -func flattenKVPairs(kvs []*logEntryKV) []interface{} { - var result []interface{} - for _, kv := range kvs { - result = append(result, kv.Key) - result = append(result, kv.Value) - } - - return result -} - -// parseJSON handles parsing JSON output -func parseJSON(input []byte) (*logEntry, error) { - var raw map[string]interface{} - entry := &logEntry{} - - err := json.Unmarshal(input, &raw) - if err != nil { - return nil, err - } - - // Parse hclog-specific objects - if v, ok := raw["@message"]; ok { - entry.Message = v.(string) - delete(raw, "@message") - } - - if v, ok := raw["@level"]; ok { - entry.Level = v.(string) - delete(raw, "@level") - } - - if v, ok := raw["@timestamp"]; ok { - t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) - if err != nil { - return nil, err - } - entry.Timestamp = t - delete(raw, "@timestamp") - } - - // Parse dynamic KV args from the hclog payload. - for k, v := range raw { - entry.KVPairs = append(entry.KVPairs, &logEntryKV{ - Key: k, - Value: v, - }) - } - - return entry, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go deleted file mode 100644 index 09ecafaf45a..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/mtls.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "math/big" - "time" -) - -// generateCert generates a temporary certificate for plugin authentication. The -// certificate and private key are returns in PEM format. -func generateCert() (cert []byte, privateKey []byte, err error) { - key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - if err != nil { - return nil, nil, err - } - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - sn, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, nil, err - } - - host := "localhost" - - template := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: host, - Organization: []string{"HashiCorp"}, - }, - DNSNames: []string{host}, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - x509.ExtKeyUsageServerAuth, - }, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - SerialNumber: sn, - NotBefore: time.Now().Add(-30 * time.Second), - NotAfter: time.Now().Add(262980 * time.Hour), - IsCA: true, - } - - der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) - if err != nil { - return nil, nil, err - } - - var certOut bytes.Buffer - if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { - return nil, nil, err - } - - keyBytes, err := x509.MarshalECPrivateKey(key) - if err != nil { - return nil, nil, err - } - - var keyOut bytes.Buffer - if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { - return nil, nil, err - } - - cert = certOut.Bytes() - privateKey = keyOut.Bytes() - - return cert, privateKey, nil -} diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go deleted file mode 100644 index 4eb1208fbb7..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/mux_broker.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "encoding/binary" - "fmt" - "log" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/hashicorp/yamux" -) - -// MuxBroker is responsible for brokering multiplexed connections by unique ID. -// -// It is used by plugins to multiplex multiple RPC connections and data -// streams on top of a single connection between the plugin process and the -// host process. -// -// This allows a plugin to request a channel with a specific ID to connect to -// or accept a connection from, and the broker handles the details of -// holding these channels open while they're being negotiated. -// -// The Plugin interface has access to these for both Server and Client. -// The broker can be used by either (optionally) to reserve and connect to -// new multiplexed streams. This is useful for complex args and return values, -// or anything else you might need a data stream for. -type MuxBroker struct { - nextId uint32 - session *yamux.Session - streams map[uint32]*muxBrokerPending - - sync.Mutex -} - -type muxBrokerPending struct { - ch chan net.Conn - doneCh chan struct{} -} - -func newMuxBroker(s *yamux.Session) *MuxBroker { - return &MuxBroker{ - session: s, - streams: make(map[uint32]*muxBrokerPending), - } -} - -// Accept accepts a connection by ID. -// -// This should not be called multiple times with the same ID at one time. -func (m *MuxBroker) Accept(id uint32) (net.Conn, error) { - var c net.Conn - p := m.getStream(id) - select { - case c = <-p.ch: - close(p.doneCh) - case <-time.After(5 * time.Second): - m.Lock() - defer m.Unlock() - delete(m.streams, id) - - return nil, fmt.Errorf("timeout waiting for accept") - } - - // Ack our connection - if err := binary.Write(c, binary.LittleEndian, id); err != nil { - c.Close() - return nil, err - } - - return c, nil -} - -// AcceptAndServe is used to accept a specific stream ID and immediately -// serve an RPC server on that stream ID. This is used to easily serve -// complex arguments. -// -// The served interface is always registered to the "Plugin" name. -func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) { - conn, err := m.Accept(id) - if err != nil { - log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) - return - } - - serve(conn, "Plugin", v) -} - -// Close closes the connection and all sub-connections. -func (m *MuxBroker) Close() error { - return m.session.Close() -} - -// Dial opens a connection by ID. -func (m *MuxBroker) Dial(id uint32) (net.Conn, error) { - // Open the stream - stream, err := m.session.OpenStream() - if err != nil { - return nil, err - } - - // Write the stream ID onto the wire. - if err := binary.Write(stream, binary.LittleEndian, id); err != nil { - stream.Close() - return nil, err - } - - // Read the ack that we connected. Then we're off! - var ack uint32 - if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { - stream.Close() - return nil, err - } - if ack != id { - stream.Close() - return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) - } - - return stream, nil -} - -// NextId returns a unique ID to use next. -// -// It is possible for very long-running plugin hosts to wrap this value, -// though it would require a very large amount of RPC calls. In practice -// we've never seen it happen. -func (m *MuxBroker) NextId() uint32 { - return atomic.AddUint32(&m.nextId, 1) -} - -// Run starts the brokering and should be executed in a goroutine, since it -// blocks forever, or until the session closes. -// -// Uses of MuxBroker never need to call this. It is called internally by -// the plugin host/client. -func (m *MuxBroker) Run() { - for { - stream, err := m.session.AcceptStream() - if err != nil { - // Once we receive an error, just exit - break - } - - // Read the stream ID from the stream - var id uint32 - if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { - stream.Close() - continue - } - - // Initialize the waiter - p := m.getStream(id) - select { - case p.ch <- stream: - default: - } - - // Wait for a timeout - go m.timeoutWait(id, p) - } -} - -func (m *MuxBroker) getStream(id uint32) *muxBrokerPending { - m.Lock() - defer m.Unlock() - - p, ok := m.streams[id] - if ok { - return p - } - - m.streams[id] = &muxBrokerPending{ - ch: make(chan net.Conn, 1), - doneCh: make(chan struct{}), - } - return m.streams[id] -} - -func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) { - // Wait for the stream to either be picked up and connected, or - // for a timeout. - timeout := false - select { - case <-p.doneCh: - case <-time.After(5 * time.Second): - timeout = true - } - - m.Lock() - defer m.Unlock() - - // Delete the stream so no one else can grab it - delete(m.streams, id) - - // If we timed out, then check if we have a channel in the buffer, - // and if so, close it. - if timeout { - select { - case s := <-p.ch: - s.Close() - } - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go deleted file mode 100644 index 184749b96ef..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -// The plugin package exposes functions and helpers for communicating to -// plugins which are implemented as standalone binary applications. -// -// plugin.Client fully manages the lifecycle of executing the application, -// connecting to it, and returning the RPC client for dispensing plugins. -// -// plugin.Serve fully manages listeners to expose an RPC server from a binary -// that plugin.Client can connect to. -package plugin - -import ( - "context" - "errors" - "net/rpc" - - "google.golang.org/grpc" -) - -// Plugin is the interface that is implemented to serve/connect to an -// inteface implementation. -type Plugin interface { - // Server should return the RPC server compatible struct to serve - // the methods that the Client calls over net/rpc. - Server(*MuxBroker) (interface{}, error) - - // Client returns an interface implementation for the plugin you're - // serving that communicates to the server end of the plugin. - Client(*MuxBroker, *rpc.Client) (interface{}, error) -} - -// GRPCPlugin is the interface that is implemented to serve/connect to -// a plugin over gRPC. -type GRPCPlugin interface { - // GRPCServer should register this plugin for serving with the - // given GRPCServer. Unlike Plugin.Server, this is only called once - // since gRPC plugins serve singletons. - GRPCServer(*GRPCBroker, *grpc.Server) error - - // GRPCClient should return the interface implementation for the plugin - // you're serving via gRPC. The provided context will be canceled by - // go-plugin in the event of the plugin process exiting. - GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) -} - -// NetRPCUnsupportedPlugin implements Plugin but returns errors for the -// Server and Client functions. This will effectively disable support for -// net/rpc based plugins. -// -// This struct can be embedded in your struct. -type NetRPCUnsupportedPlugin struct{} - -func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { - return nil, errors.New("net/rpc plugin protocol not supported") -} - -func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { - return nil, errors.New("net/rpc plugin protocol not supported") -} diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go deleted file mode 100644 index b88446361da..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/process.go +++ /dev/null @@ -1,4 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go deleted file mode 100644 index e4b7be38378..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/protocol.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "io" - "net" -) - -// Protocol is an enum representing the types of protocols. -type Protocol string - -const ( - ProtocolInvalid Protocol = "" - ProtocolNetRPC Protocol = "netrpc" - ProtocolGRPC Protocol = "grpc" -) - -// ServerProtocol is an interface that must be implemented for new plugin -// protocols to be servers. -type ServerProtocol interface { - // Init is called once to configure and initialize the protocol, but - // not start listening. This is the point at which all validation should - // be done and errors returned. - Init() error - - // Config is extra configuration to be outputted to stdout. This will - // be automatically base64 encoded to ensure it can be parsed properly. - // This can be an empty string if additional configuration is not needed. - Config() string - - // Serve is called to serve connections on the given listener. This should - // continue until the listener is closed. - Serve(net.Listener) -} - -// ClientProtocol is an interface that must be implemented for new plugin -// protocols to be clients. -type ClientProtocol interface { - io.Closer - - // Dispense dispenses a new instance of the plugin with the given name. - Dispense(string) (interface{}, error) - - // Ping checks that the client connection is still healthy. - Ping() error -} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go deleted file mode 100644 index 142454df80d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "crypto/tls" - "fmt" - "io" - "net" - "net/rpc" - - "github.com/hashicorp/yamux" -) - -// RPCClient connects to an RPCServer over net/rpc to dispense plugin types. -type RPCClient struct { - broker *MuxBroker - control *rpc.Client - plugins map[string]Plugin - - // These are the streams used for the various stdout/err overrides - stdout, stderr net.Conn -} - -// newRPCClient creates a new RPCClient. The Client argument is expected -// to be successfully started already with a lock held. -func newRPCClient(c *Client) (*RPCClient, error) { - // Connect to the client - conn, err := net.Dial(c.address.Network(), c.address.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } - - if c.config.TLSConfig != nil { - conn = tls.Client(conn, c.config.TLSConfig) - } - - // Create the actual RPC client - result, err := NewRPCClient(conn, c.config.Plugins) - if err != nil { - conn.Close() - return nil, err - } - - // Begin the stream syncing so that stdin, out, err work properly - err = result.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) - if err != nil { - result.Close() - return nil, err - } - - return result, nil -} - -// NewRPCClient creates a client from an already-open connection-like value. -// Dial is typically used instead. -func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { - // Create the yamux client so we can multiplex - mux, err := yamux.Client(conn, nil) - if err != nil { - conn.Close() - return nil, err - } - - // Connect to the control stream. - control, err := mux.Open() - if err != nil { - mux.Close() - return nil, err - } - - // Connect stdout, stderr streams - stdstream := make([]net.Conn, 2) - for i, _ := range stdstream { - stdstream[i], err = mux.Open() - if err != nil { - mux.Close() - return nil, err - } - } - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Build the client using our broker and control channel. - return &RPCClient{ - broker: broker, - control: rpc.NewClient(control), - plugins: plugins, - stdout: stdstream[0], - stderr: stdstream[1], - }, nil -} - -// SyncStreams should be called to enable syncing of stdout, -// stderr with the plugin. -// -// This will return immediately and the syncing will continue to happen -// in the background. You do not need to launch this in a goroutine itself. -// -// This should never be called multiple times. -func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error { - go copyStream("stdout", stdout, c.stdout) - go copyStream("stderr", stderr, c.stderr) - return nil -} - -// Close closes the connection. The client is no longer usable after this -// is called. -func (c *RPCClient) Close() error { - // Call the control channel and ask it to gracefully exit. If this - // errors, then we save it so that we always return an error but we - // want to try to close the other channels anyways. - var empty struct{} - returnErr := c.control.Call("Control.Quit", true, &empty) - - // Close the other streams we have - if err := c.control.Close(); err != nil { - return err - } - if err := c.stdout.Close(); err != nil { - return err - } - if err := c.stderr.Close(); err != nil { - return err - } - if err := c.broker.Close(); err != nil { - return err - } - - // Return back the error we got from Control.Quit. This is very important - // since we MUST return non-nil error if this fails so that Client.Kill - // will properly try a process.Kill. - return returnErr -} - -func (c *RPCClient) Dispense(name string) (interface{}, error) { - p, ok := c.plugins[name] - if !ok { - return nil, fmt.Errorf("unknown plugin type: %s", name) - } - - var id uint32 - if err := c.control.Call( - "Dispenser.Dispense", name, &id); err != nil { - return nil, err - } - - conn, err := c.broker.Dial(id) - if err != nil { - return nil, err - } - - return p.Client(c.broker, rpc.NewClient(conn)) -} - -// Ping pings the connection to ensure it is still alive. -// -// The error from the RPC call is returned exactly if you want to inspect -// it for further error analysis. Any error returned from here would indicate -// that the connection to the plugin is not healthy. -func (c *RPCClient) Ping() error { - var empty struct{} - return c.control.Call("Control.Ping", true, &empty) -} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go deleted file mode 100644 index cec0a3d93a2..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "errors" - "fmt" - "io" - "log" - "net" - "net/rpc" - "sync" - - "github.com/hashicorp/yamux" -) - -// RPCServer listens for network connections and then dispenses interface -// implementations over net/rpc. -// -// After setting the fields below, they shouldn't be read again directly -// from the structure which may be reading/writing them concurrently. -type RPCServer struct { - Plugins map[string]Plugin - - // Stdout, Stderr are what this server will use instead of the - // normal stdin/out/err. This is because due to the multi-process nature - // of our plugin system, we can't use the normal process values so we - // make our own custom one we pipe across. - Stdout io.Reader - Stderr io.Reader - - // DoneCh should be set to a non-nil channel that will be closed - // when the control requests the RPC server to end. - DoneCh chan<- struct{} - - lock sync.Mutex -} - -// ServerProtocol impl. -func (s *RPCServer) Init() error { return nil } - -// ServerProtocol impl. -func (s *RPCServer) Config() string { return "" } - -// ServerProtocol impl. -func (s *RPCServer) Serve(lis net.Listener) { - defer s.done() - - for { - conn, err := lis.Accept() - if err != nil { - severity := "ERR" - if errors.Is(err, net.ErrClosed) { - severity = "DEBUG" - } - log.Printf("[%s] plugin: plugin server: %s", severity, err) - return - } - - go s.ServeConn(conn) - } -} - -// ServeConn runs a single connection. -// -// ServeConn blocks, serving the connection until the client hangs up. -func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) { - // First create the yamux server to wrap this connection - mux, err := yamux.Server(conn, nil) - if err != nil { - conn.Close() - log.Printf("[ERR] plugin: error creating yamux server: %s", err) - return - } - - // Accept the control connection - control, err := mux.Accept() - if err != nil { - mux.Close() - if err != io.EOF { - log.Printf("[ERR] plugin: error accepting control connection: %s", err) - } - - return - } - - // Connect the stdstreams (in, out, err) - stdstream := make([]net.Conn, 2) - for i := range stdstream { - stdstream[i], err = mux.Accept() - if err != nil { - mux.Close() - log.Printf("[ERR] plugin: accepting stream %d: %s", i, err) - return - } - } - - // Copy std streams out to the proper place - go copyStream("stdout", stdstream[0], s.Stdout) - go copyStream("stderr", stdstream[1], s.Stderr) - - // Create the broker and start it up - broker := newMuxBroker(mux) - go broker.Run() - - // Use the control connection to build the dispenser and serve the - // connection. - server := rpc.NewServer() - server.RegisterName("Control", &controlServer{ - server: s, - }) - server.RegisterName("Dispenser", &dispenseServer{ - broker: broker, - plugins: s.Plugins, - }) - server.ServeConn(control) -} - -// done is called internally by the control server to trigger the -// doneCh to close which is listened to by the main process to cleanly -// exit. -func (s *RPCServer) done() { - s.lock.Lock() - defer s.lock.Unlock() - - if s.DoneCh != nil { - close(s.DoneCh) - s.DoneCh = nil - } -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type controlServer struct { - server *RPCServer -} - -// Ping can be called to verify the connection (and likely the binary) -// is still alive to a plugin. -func (c *controlServer) Ping( - null bool, response *struct{}, -) error { - *response = struct{}{} - return nil -} - -func (c *controlServer) Quit( - null bool, response *struct{}, -) error { - // End the server - c.server.done() - - // Always return true - *response = struct{}{} - - return nil -} - -// dispenseServer dispenses variousinterface implementations for Terraform. -type dispenseServer struct { - broker *MuxBroker - plugins map[string]Plugin -} - -func (d *dispenseServer) Dispense( - name string, response *uint32, -) error { - // Find the function to create this implementation - p, ok := d.plugins[name] - if !ok { - return fmt.Errorf("unknown plugin type: %s", name) - } - - // Create the implementation first so we know if there is an error. - impl, err := p.Server(d.broker) - if err != nil { - // We turn the error into an errors error so that it works across RPC - return errors.New(err.Error()) - } - - // Reserve an ID for our implementation - id := d.broker.NextId() - *response = id - - // Run the rest in a goroutine since it can only happen once this RPC - // call returns. We wait for a connection for the plugin implementation - // and serve it. - go func() { - conn, err := d.broker.Accept(id) - if err != nil { - log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err) - return - } - - serve(conn, "Plugin", impl) - }() - - return nil -} - -func serve(conn io.ReadWriteCloser, name string, v interface{}) { - server := rpc.NewServer() - if err := server.RegisterName(name, v); err != nil { - log.Printf("[ERR] go-plugin: plugin dispense error: %s", err) - return - } - - server.ServeConn(conn) -} diff --git a/vendor/github.com/hashicorp/go-plugin/runner/runner.go b/vendor/github.com/hashicorp/go-plugin/runner/runner.go deleted file mode 100644 index e638ae5f8ee..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/runner/runner.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package runner - -import ( - "context" - "io" -) - -// Runner defines the interface required by go-plugin to manage the lifecycle of -// of a plugin and attempt to negotiate a connection with it. Note that this -// is orthogonal to the protocol and transport used, which is negotiated over stdout. -type Runner interface { - // Start should start the plugin and ensure any work required for servicing - // other interface methods is done. If the context is cancelled, it should - // only abort any attempts to _start_ the plugin. Waiting and shutdown are - // handled separately. - Start(ctx context.Context) error - - // Diagnose makes a best-effort attempt to return any debug information that - // might help users understand why a plugin failed to start and negotiate a - // connection. - Diagnose(ctx context.Context) string - - // Stdout is used to negotiate the go-plugin protocol. - Stdout() io.ReadCloser - - // Stderr is used for forwarding plugin logs to the host process logger. - Stderr() io.ReadCloser - - // Name is a human-friendly name for the plugin, such as the path to the - // executable. It does not have to be unique. - Name() string - - AttachedRunner -} - -// AttachedRunner defines a limited subset of Runner's interface to represent the -// reduced responsibility for plugin lifecycle when attaching to an already running -// plugin. -type AttachedRunner interface { - // Wait should wait until the plugin stops running, whether in response to - // an out of band signal or in response to calling Kill(). - Wait(ctx context.Context) error - - // Kill should stop the plugin and perform any cleanup required. - Kill(ctx context.Context) error - - // ID is a unique identifier to represent the running plugin. e.g. pid or - // container ID. - ID() string - - AddrTranslator -} - -// AddrTranslator translates addresses between the execution context of the host -// process and the plugin. For example, if the plugin is in a container, the file -// path for a Unix socket may be different between the host and the container. -// -// It is only intended to be used by the host process. -type AddrTranslator interface { - // Called before connecting on any addresses received back from the plugin. - PluginToHost(pluginNet, pluginAddr string) (hostNet string, hostAddr string, err error) - - // Called on any host process addresses before they are sent to the plugin. - HostToPlugin(hostNet, hostAddr string) (pluginNet string, pluginAddr string, err error) -} - -// ReattachFunc can be passed to a client's reattach config to reattach to an -// already running plugin instead of starting it ourselves. -type ReattachFunc func() (AttachedRunner, error) diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go deleted file mode 100644 index e741bc7fa18..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ /dev/null @@ -1,665 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "os" - "os/signal" - "os/user" - "runtime" - "sort" - "strconv" - "strings" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "google.golang.org/grpc" -) - -// CoreProtocolVersion is the ProtocolVersion of the plugin system itself. -// We will increment this whenever we change any protocol behavior. This -// will invalidate any prior plugins but will at least allow us to iterate -// on the core in a safe way. We will do our best to do this very -// infrequently. -const CoreProtocolVersion = 1 - -// HandshakeConfig is the configuration used by client and servers to -// handshake before starting a plugin connection. This is embedded by -// both ServeConfig and ClientConfig. -// -// In practice, the plugin host creates a HandshakeConfig that is exported -// and plugins then can easily consume it. -type HandshakeConfig struct { - // ProtocolVersion is the version that clients must match on to - // agree they can communicate. This should match the ProtocolVersion - // set on ClientConfig when using a plugin. - // This field is not required if VersionedPlugins are being used in the - // Client or Server configurations. - ProtocolVersion uint - - // MagicCookieKey and value are used as a very basic verification - // that a plugin is intended to be launched. This is not a security - // measure, just a UX feature. If the magic cookie doesn't match, - // we show human-friendly output. - MagicCookieKey string - MagicCookieValue string -} - -// PluginSet is a set of plugins provided to be registered in the plugin -// server. -type PluginSet map[string]Plugin - -// ServeConfig configures what sorts of plugins are served. -type ServeConfig struct { - // HandshakeConfig is the configuration that must match clients. - HandshakeConfig - - // TLSProvider is a function that returns a configured tls.Config. - TLSProvider func() (*tls.Config, error) - - // Plugins are the plugins that are served. - // The implied version of this PluginSet is the Handshake.ProtocolVersion. - Plugins PluginSet - - // VersionedPlugins is a map of PluginSets for specific protocol versions. - // These can be used to negotiate a compatible version between client and - // server. If this is set, Handshake.ProtocolVersion is not required. - VersionedPlugins map[int]PluginSet - - // GRPCServer should be non-nil to enable serving the plugins over - // gRPC. This is a function to create the server when needed with the - // given server options. The server options populated by go-plugin will - // be for TLS if set. You may modify the input slice. - // - // Note that the grpc.Server will automatically be registered with - // the gRPC health checking service. This is not optional since go-plugin - // relies on this to implement Ping(). - GRPCServer func([]grpc.ServerOption) *grpc.Server - - // Logger is used to pass a logger into the server. If none is provided the - // server will create a default logger. - Logger hclog.Logger - - // Test, if non-nil, will put plugin serving into "test mode". This is - // meant to be used as part of `go test` within a plugin's codebase to - // launch the plugin in-process and output a ReattachConfig. - // - // This changes the behavior of the server in a number of ways to - // accomodate the expectation of running in-process: - // - // * The handshake cookie is not validated. - // * Stdout/stderr will receive plugin reads and writes - // * Connection information will not be sent to stdout - // - Test *ServeTestConfig -} - -// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. -type ServeTestConfig struct { - // Context, if set, will force the plugin serving to end when cancelled. - // This is only a test configuration because the non-test configuration - // expects to take over the process and therefore end on an interrupt or - // kill signal. For tests, we need to kill the plugin serving routinely - // and this provides a way to do so. - // - // If you want to wait for the plugin process to close before moving on, - // you can wait on CloseCh. - Context context.Context - - // If this channel is non-nil, we will send the ReattachConfig via - // this channel. This can be encoded (via JSON recommended) to the - // plugin client to attach to this plugin. - ReattachConfigCh chan<- *ReattachConfig - - // CloseCh, if non-nil, will be closed when serving exits. This can be - // used along with Context to determine when the server is fully shut down. - // If this is not set, you can still use Context on its own, but note there - // may be a period of time between canceling the context and the plugin - // server being shut down. - CloseCh chan<- struct{} - - // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" - // functionality to work. This defaults to false because the implementation - // of making this work within test environments is particularly messy - // and SyncStdio functionality is fairly rare, so we default to the simple - // scenario. - SyncStdio bool -} - -func unixSocketConfigFromEnv() UnixSocketConfig { - return UnixSocketConfig{ - Group: os.Getenv(EnvUnixSocketGroup), - socketDir: os.Getenv(EnvUnixSocketDir), - } -} - -// protocolVersion determines the protocol version and plugin set to be used by -// the server. In the event that there is no suitable version, the last version -// in the config is returned leaving the client to report the incompatibility. -func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { - protoVersion := int(opts.ProtocolVersion) - pluginSet := opts.Plugins - protoType := ProtocolNetRPC - // Check if the client sent a list of acceptable versions - var clientVersions []int - if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { - for _, s := range strings.Split(vs, ",") { - v, err := strconv.Atoi(s) - if err != nil { - fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) - continue - } - clientVersions = append(clientVersions, v) - } - } - - // We want to iterate in reverse order, to ensure we match the newest - // compatible plugin version. - sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) - - // set the old un-versioned fields as if they were versioned plugins - if opts.VersionedPlugins == nil { - opts.VersionedPlugins = make(map[int]PluginSet) - } - - if pluginSet != nil { - opts.VersionedPlugins[protoVersion] = pluginSet - } - - // Sort the version to make sure we match the latest first - var versions []int - for v := range opts.VersionedPlugins { - versions = append(versions, v) - } - - sort.Sort(sort.Reverse(sort.IntSlice(versions))) - - // See if we have multiple versions of Plugins to choose from - for _, version := range versions { - // Record each version, since we guarantee that this returns valid - // values even if they are not a protocol match. - protoVersion = version - pluginSet = opts.VersionedPlugins[version] - - // If we have a configured gRPC server we should select a protocol - if opts.GRPCServer != nil { - // All plugins in a set must use the same transport, so check the first - // for the protocol type - for _, p := range pluginSet { - switch p.(type) { - case GRPCPlugin: - protoType = ProtocolGRPC - default: - protoType = ProtocolNetRPC - } - break - } - } - - for _, clientVersion := range clientVersions { - if clientVersion == protoVersion { - return protoVersion, protoType, pluginSet - } - } - } - - // Return the lowest version as the fallback. - // Since we iterated over all the versions in reverse order above, these - // values are from the lowest version number plugins (which may be from - // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins - // fields). This allows serving the oldest version of our plugins to a - // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. - return protoVersion, protoType, pluginSet -} - -// Serve serves the plugins given by ServeConfig. -// -// Serve doesn't return until the plugin is done being executed. Any -// fixable errors will be output to os.Stderr and the process will -// exit with a status code of 1. Serve will panic for unexpected -// conditions where a user's fix is unknown. -// -// This is the method that plugins should call in their main() functions. -func Serve(opts *ServeConfig) { - exitCode := -1 - // We use this to trigger an `os.Exit` so that we can execute our other - // deferred functions. In test mode, we just output the err to stderr - // and return. - defer func() { - if opts.Test == nil && exitCode >= 0 { - os.Exit(exitCode) - } - - if opts.Test != nil && opts.Test.CloseCh != nil { - close(opts.Test.CloseCh) - } - }() - - if opts.Test == nil { - // Validate the handshake config - if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { - fmt.Fprintf(os.Stderr, - "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ - "key or value was set. Please notify the plugin author and report\n"+ - "this as a bug.\n") - exitCode = 1 - return - } - - // First check the cookie - if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a plugin. These are not meant to be executed directly.\n"+ - "Please execute the program that consumes these plugins, which will\n"+ - "load any plugins automatically\n") - exitCode = 1 - return - } - } - - // negotiate the version and plugins - // start with default version in the handshake config - protoVersion, protoType, pluginSet := protocolVersion(opts) - - logger := opts.Logger - if logger == nil { - // internal logger to os.Stderr - logger = hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, - Output: os.Stderr, - JSONFormat: true, - }) - } - - // Register a listener so we can accept a connection - listener, err := serverListener(unixSocketConfigFromEnv()) - if err != nil { - logger.Error("plugin init error", "error", err) - return - } - - // Close the listener on return. We wrap this in a func() on purpose - // because the "listener" reference may change to TLS. - defer func() { - listener.Close() - }() - - var tlsConfig *tls.Config - if opts.TLSProvider != nil { - tlsConfig, err = opts.TLSProvider() - if err != nil { - logger.Error("plugin tls init", "error", err) - return - } - } - - var serverCert string - clientCert := os.Getenv("PLUGIN_CLIENT_CERT") - // If the client is configured using AutoMTLS, the certificate will be here, - // and we need to generate our own in response. - if tlsConfig == nil && clientCert != "" { - logger.Info("configuring server automatic mTLS") - clientCertPool := x509.NewCertPool() - if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { - logger.Error("client cert provided but failed to parse", "cert", clientCert) - } - - certPEM, keyPEM, err := generateCert() - if err != nil { - logger.Error("failed to generate server certificate", "error", err) - panic(err) - } - - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - logger.Error("failed to parse server certificate", "error", err) - panic(err) - } - - tlsConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: clientCertPool, - MinVersion: tls.VersionTLS12, - RootCAs: clientCertPool, - ServerName: "localhost", - } - - // We send back the raw leaf cert data for the client rather than the - // PEM, since the protocol can't handle newlines. - serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) - } - - // Create the channel to tell us when we're done - doneCh := make(chan struct{}) - - // Create our new stdout, stderr files. These will override our built-in - // stdout/stderr so that it works across the stream boundary. - var stdout_r, stderr_r io.Reader - stdout_r, stdout_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - stderr_r, stderr_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - - // If we're in test mode, we tee off the reader and write the data - // as-is to our normal Stdout and Stderr so that they continue working - // while stdio works. This is because in test mode, we assume we're running - // in `go test` or some equivalent and we want output to go to standard - // locations. - if opts.Test != nil { - // TODO(mitchellh): This isn't super ideal because a TeeReader - // only works if the reader side is actively read. If we never - // connect via a plugin client, the output still gets swallowed. - stdout_r = io.TeeReader(stdout_r, os.Stdout) - stderr_r = io.TeeReader(stderr_r, os.Stderr) - } - - // Build the server type - var server ServerProtocol - switch protoType { - case ProtocolNetRPC: - // If we have a TLS configuration then we wrap the listener - // ourselves and do it at that level. - if tlsConfig != nil { - listener = tls.NewListener(listener, tlsConfig) - } - - // Create the RPC server to dispense - server = &RPCServer{ - Plugins: pluginSet, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, - } - - case ProtocolGRPC: - var muxer *grpcmux.GRPCServerMuxer - if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex { - muxer = grpcmux.NewGRPCServerMuxer(logger, listener) - listener = muxer - } - - // Create the gRPC server - server = &GRPCServer{ - Plugins: pluginSet, - Server: opts.GRPCServer, - TLS: tlsConfig, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, - logger: logger, - muxer: muxer, - } - - default: - panic("unknown server protocol: " + protoType) - } - - // Initialize the servers - if err := server.Init(); err != nil { - logger.Error("protocol init", "error", err) - return - } - - logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - - // Output the address and service name to stdout so that the client can - // bring it up. In test mode, we don't do this because clients will - // attach via a reattach config. - if opts.Test == nil { - const grpcBrokerMultiplexingSupported = true - protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s", - CoreProtocolVersion, - protoVersion, - listener.Addr().Network(), - listener.Addr().String(), - protoType, - serverCert) - - // Old clients will error with new plugins if we blindly append the - // seventh segment for gRPC broker multiplexing support, because old - // client code uses strings.SplitN(line, "|", 6), which means a seventh - // segment will get appended to the sixth segment as "sixthpart|true". - // - // If the environment variable is set, we assume the client is new enough - // to handle a seventh segment, as it should now use - // strings.Split(line, "|") and always handle each segment individually. - if os.Getenv(envMultiplexGRPC) != "" { - protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported) - } - fmt.Printf("%s\n", protocolLine) - os.Stdout.Sync() - } else if ch := opts.Test.ReattachConfigCh; ch != nil { - // Send back the reattach config that can be used. This isn't - // quite ready if they connect immediately but the client should - // retry a few times. - ch <- &ReattachConfig{ - Protocol: protoType, - ProtocolVersion: protoVersion, - Addr: listener.Addr(), - Pid: os.Getpid(), - Test: true, - } - } - - // Eat the interrupts. In test mode we disable this so that go test - // can be cancelled properly. - if opts.Test == nil { - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - count := 0 - for { - <-ch - count++ - logger.Trace("plugin received interrupt signal, ignoring", "count", count) - } - }() - } - - // Set our stdout, stderr to the stdio stream that clients can retrieve - // using ClientConfig.SyncStdout/err. We only do this for non-test mode - // or if the test mode explicitly requests it. - // - // In test mode, we use a multiwriter so that the data continues going - // to the normal stdout/stderr so output can show up in test logs. We - // also send to the stdio stream so that clients can continue working - // if they depend on that. - if opts.Test == nil || opts.Test.SyncStdio { - if opts.Test != nil { - // In test mode we need to maintain the original values so we can - // reset it. - defer func(out, err *os.File) { - os.Stdout = out - os.Stderr = err - }(os.Stdout, os.Stderr) - } - os.Stdout = stdout_w - os.Stderr = stderr_w - } - - // Accept connections and wait for completion - go server.Serve(listener) - - ctx := context.Background() - if opts.Test != nil && opts.Test.Context != nil { - ctx = opts.Test.Context - } - select { - case <-ctx.Done(): - // Cancellation. We can stop the server by closing the listener. - // This isn't graceful at all but this is currently only used by - // tests and its our only way to stop. - listener.Close() - - // If this is a grpc server, then we also ask the server itself to - // end which will kill all connections. There isn't an easy way to do - // this for net/rpc currently but net/rpc is more and more unused. - if s, ok := server.(*GRPCServer); ok { - s.Stop() - } - - // Wait for the server itself to shut down - <-doneCh - - case <-doneCh: - // Note that given the documentation of Serve we should probably be - // setting exitCode = 0 and using os.Exit here. That's how it used to - // work before extracting this library. However, for years we've done - // this so we'll keep this functionality. - } -} - -func serverListener(unixSocketCfg UnixSocketConfig) (net.Listener, error) { - if runtime.GOOS == "windows" { - return serverListener_tcp() - } - - return serverListener_unix(unixSocketCfg) -} - -func serverListener_tcp() (net.Listener, error) { - envMinPort := os.Getenv("PLUGIN_MIN_PORT") - envMaxPort := os.Getenv("PLUGIN_MAX_PORT") - - var minPort, maxPort int64 - var err error - - switch { - case len(envMinPort) == 0: - minPort = 0 - default: - minPort, err = strconv.ParseInt(envMinPort, 10, 32) - if err != nil { - return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) - } - } - - switch { - case len(envMaxPort) == 0: - maxPort = 0 - default: - maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) - if err != nil { - return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) - } - } - - if minPort > maxPort { - return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) - } - - for port := minPort; port <= maxPort; port++ { - address := fmt.Sprintf("127.0.0.1:%d", port) - listener, err := net.Listen("tcp", address) - if err == nil { - return listener, nil - } - } - - return nil, errors.New("Couldn't bind plugin TCP listener") -} - -func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) { - tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin") - if err != nil { - return nil, err - } - path := tf.Name() - - // Close the file and remove it because it has to not exist for - // the domain socket. - if err := tf.Close(); err != nil { - return nil, err - } - if err := os.Remove(path); err != nil { - return nil, err - } - - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - - // By default, unix sockets are only writable by the owner. Set up a custom - // group owner and group write permissions if configured. - if unixSocketCfg.Group != "" { - err = setGroupWritable(path, unixSocketCfg.Group, 0o660) - if err != nil { - return nil, err - } - } - - // Wrap the listener in rmListener so that the Unix domain socket file - // is removed on close. - return newDeleteFileListener(l, path), nil -} - -func setGroupWritable(path, groupString string, mode os.FileMode) error { - groupID, err := strconv.Atoi(groupString) - if err != nil { - group, err := user.LookupGroup(groupString) - if err != nil { - return fmt.Errorf("failed to find gid from %q: %w", groupString, err) - } - groupID, err = strconv.Atoi(group.Gid) - if err != nil { - return fmt.Errorf("failed to parse %q group's gid as an integer: %w", groupString, err) - } - } - - err = os.Chown(path, -1, groupID) - if err != nil { - return err - } - - err = os.Chmod(path, mode) - if err != nil { - return err - } - - return nil -} - -// rmListener is an implementation of net.Listener that forwards most -// calls to the listener but also calls an additional close function. We -// use this to cleanup the unix domain socket on close, as well as clean -// up multiplexed listeners. -type rmListener struct { - net.Listener - close func() error -} - -func newDeleteFileListener(ln net.Listener, path string) *rmListener { - return &rmListener{ - Listener: ln, - close: func() error { - return os.Remove(path) - }, - } -} - -func (l *rmListener) Close() error { - // Close the listener itself - if err := l.Listener.Close(); err != nil { - return err - } - - // Remove the file - return l.close() -} diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go deleted file mode 100644 index 6b14b0c291d..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/server_mux.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "fmt" - "os" -) - -// ServeMuxMap is the type that is used to configure ServeMux -type ServeMuxMap map[string]*ServeConfig - -// ServeMux is like Serve, but serves multiple types of plugins determined -// by the argument given on the command-line. -// -// This command doesn't return until the plugin is done being executed. Any -// errors are logged or output to stderr. -func ServeMux(m ServeMuxMap) { - if len(os.Args) != 2 { - fmt.Fprintf(os.Stderr, - "Invoked improperly. This is an internal command that shouldn't\n"+ - "be manually invoked.\n") - os.Exit(1) - } - - opts, ok := m[os.Args[1]] - if !ok { - fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1]) - os.Exit(1) - } - - Serve(opts) -} diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go deleted file mode 100644 index a2348642d86..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/stream.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "io" - "log" -) - -func copyStream(name string, dst io.Writer, src io.Reader) { - if src == nil { - panic(name + ": src is nil") - } - if dst == nil { - panic(name + ": dst is nil") - } - if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err) - } -} diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go deleted file mode 100644 index a8735dfc8c7..00000000000 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package plugin - -import ( - "bytes" - "context" - "io" - "net" - "net/rpc" - - hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/grpcmux" - "github.com/mitchellh/go-testing-interface" - "google.golang.org/grpc" -) - -// TestOptions allows specifying options that can affect the behavior of the -// test functions -type TestOptions struct { - //ServerStdout causes the given value to be used in place of a blank buffer - //for RPCServer's Stdout - ServerStdout io.ReadCloser - - //ServerStderr causes the given value to be used in place of a blank buffer - //for RPCServer's Stderr - ServerStderr io.ReadCloser -} - -// The testing file contains test helpers that you can use outside of -// this package for making it easier to test plugins themselves. - -// TestConn is a helper function for returning a client and server -// net.Conn connected to each other. -func TestConn(t testing.T) (net.Conn, net.Conn) { - // Listen to any local port. This listener will be closed - // after a single connection is established. - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - // Start a goroutine to accept our client connection - var serverConn net.Conn - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - defer l.Close() - var err error - serverConn, err = l.Accept() - if err != nil { - t.Fatalf("err: %s", err) - } - }() - - // Connect to the server - clientConn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Wait for the server side to acknowledge it has connected - <-doneCh - - return clientConn, serverConn -} - -// TestRPCConn returns a rpc client and server connected to each other. -func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { - clientConn, serverConn := TestConn(t) - - server := rpc.NewServer() - go server.ServeConn(serverConn) - - client := rpc.NewClient(clientConn) - return client, server -} - -// TestPluginRPCConn returns a plugin RPC client and server that are connected -// together and configured. -func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { - // Create two net.Conns we can use to shuttle our control connection - clientConn, serverConn := TestConn(t) - - // Start up the server - server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} - if opts != nil { - if opts.ServerStdout != nil { - server.Stdout = opts.ServerStdout - } - if opts.ServerStderr != nil { - server.Stderr = opts.ServerStderr - } - } - go server.ServeConn(serverConn) - - // Connect the client to the server - client, err := NewRPCClient(clientConn, ps) - if err != nil { - t.Fatalf("err: %s", err) - } - - return client, server -} - -// TestGRPCConn returns a gRPC client conn and grpc server that are connected -// together and configured. The register function is used to register services -// prior to the Serve call. This is used to test gRPC connections. -func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { - // Create a listener - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("err: %s", err) - } - - server := grpc.NewServer() - register(server) - go server.Serve(l) - - // Connect to the server - conn, err := grpc.Dial( - l.Addr().String(), - grpc.WithBlock(), - grpc.WithInsecure()) - if err != nil { - t.Fatalf("err: %s", err) - } - - // Connection successful, close the listener - l.Close() - - return conn, server -} - -// TestPluginGRPCConn returns a plugin gRPC client and server that are connected -// together and configured. This is used to test gRPC connections. -func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { - // Create a listener - ln, err := serverListener(UnixSocketConfig{}) - if err != nil { - t.Fatal(err) - } - - logger := hclog.New(&hclog.LoggerOptions{ - Level: hclog.Debug, - }) - - // Start up the server - var muxer *grpcmux.GRPCServerMuxer - if multiplex { - muxer = grpcmux.NewGRPCServerMuxer(logger, ln) - ln = muxer - } - server := &GRPCServer{ - Plugins: ps, - DoneCh: make(chan struct{}), - Server: DefaultGRPCServer, - Stdout: new(bytes.Buffer), - Stderr: new(bytes.Buffer), - logger: logger, - muxer: muxer, - } - if err := server.Init(); err != nil { - t.Fatalf("err: %s", err) - } - go server.Serve(ln) - - client := &Client{ - address: ln.Addr(), - protocol: ProtocolGRPC, - config: &ClientConfig{ - Plugins: ps, - GRPCBrokerMultiplex: multiplex, - }, - logger: logger, - } - - grpcClient, err := newGRPCClient(context.Background(), client) - if err != nil { - t.Fatal(err) - } - - return grpcClient, server -} diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore similarity index 100% rename from vendor/github.com/hashicorp/yamux/.gitignore rename to vendor/github.com/hashicorp/golang-lru/.gitignore diff --git a/vendor/github.com/hashicorp/golang-lru/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/.golangci.yml new file mode 100644 index 00000000000..49202fc41e6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/.golangci.yml @@ -0,0 +1,30 @@ +linters: + enable: + - megacheck + - revive + - govet + - unconvert + - megacheck + - gas + - gocyclo + - dupl + - misspell + - unparam + - unused + - typecheck + - ineffassign + - stylecheck + - exportloopref + - gocritic + - nakedret + - gosimple + - prealloc + fast: false + disable-all: true + +issues: + exclude-rules: + - path: _test\.go + linters: + - dupl + exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 00000000000..15fcad0306e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,222 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 00000000000..03bcfb5b76b --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,7 @@ +golang-lru +========== + +Please upgrade to github.com/hashicorp/golang-lru/v2 for all new code as v1 will +not be updated anymore. The v2 version supports generics and is faster; old code +can specify a specific tag, e.g. github.com/hashicorp/golang-lru/v1.0.2 for +backwards compatibility. diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 00000000000..e396f8428aa --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,256 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 00000000000..2547df979d0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 00000000000..895d8e3ea0c --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,231 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // DefaultEvictedBufferSize defines the default buffer size to store evicted key/val + DefaultEvictedBufferSize = 16 +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru *simplelru.LRU + evictedKeys, evictedVals []interface{} + onEvictedCB func(k, v interface{}) + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key, value interface{})) (c *Cache, err error) { + // create a cache with default settings + c = &Cache{ + onEvictedCB: onEvicted, + } + if onEvicted != nil { + c.initEvictBuffers() + onEvicted = c.onEvicted + } + c.lru, err = simplelru.NewLRU(size, onEvicted) + return +} + +func (c *Cache) initEvictBuffers() { + c.evictedKeys = make([]interface{}, 0, DefaultEvictedBufferSize) + c.evictedVals = make([]interface{}, 0, DefaultEvictedBufferSize) +} + +// onEvicted save evicted key/val and sent in externally registered callback +// outside of critical section +func (c *Cache) onEvicted(k, v interface{}) { + c.evictedKeys = append(c.evictedKeys, k) + c.evictedVals = append(c.evictedVals, v) +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + var ks, vs []interface{} + c.lock.Lock() + c.lru.Purge() + if c.onEvictedCB != nil && len(c.evictedKeys) > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + // invoke callback outside of critical section + if c.onEvictedCB != nil { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + var k, v interface{} + c.lock.Lock() + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + value, ok = c.lru.Get(key) + c.lock.Unlock() + return value, ok +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + containKey := c.lru.Contains(key) + c.lock.RUnlock() + return containKey +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + value, ok = c.lru.Peek(key) + c.lock.RUnlock() + return value, ok +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + var k, v interface{} + c.lock.Lock() + if c.lru.Contains(key) { + c.lock.Unlock() + return true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return false, evicted +} + +// PeekOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) { + var k, v interface{} + c.lock.Lock() + previous, ok = c.lru.Peek(key) + if ok { + c.lock.Unlock() + return previous, true, false + } + evicted = c.lru.Add(key, value) + if c.onEvictedCB != nil && evicted { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted { + c.onEvictedCB(k, v) + } + return nil, false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) (present bool) { + var k, v interface{} + c.lock.Lock() + present = c.lru.Remove(key) + if c.onEvictedCB != nil && present { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && present { + c.onEvictedCB(k, v) + } + return +} + +// Resize changes the cache size. +func (c *Cache) Resize(size int) (evicted int) { + var ks, vs []interface{} + c.lock.Lock() + evicted = c.lru.Resize(size) + if c.onEvictedCB != nil && evicted > 0 { + ks, vs = c.evictedKeys, c.evictedVals + c.initEvictBuffers() + } + c.lock.Unlock() + if c.onEvictedCB != nil && evicted > 0 { + for i := 0; i < len(ks); i++ { + c.onEvictedCB(ks[i], vs[i]) + } + } + return evicted +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() (key, value interface{}, ok bool) { + var k, v interface{} + c.lock.Lock() + key, value, ok = c.lru.RemoveOldest() + if c.onEvictedCB != nil && ok { + k, v = c.evictedKeys[0], c.evictedVals[0] + c.evictedKeys, c.evictedVals = c.evictedKeys[:0], c.evictedVals[:0] + } + c.lock.Unlock() + if c.onEvictedCB != nil && ok { + c.onEvictedCB(k, v) + } + return +} + +// GetOldest returns the oldest entry +func (c *Cache) GetOldest() (key, value interface{}, ok bool) { + c.lock.RLock() + key, value, ok = c.lru.GetOldest() + c.lock.RUnlock() + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + keys := c.lru.Keys() + c.lock.RUnlock() + return keys +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + length := c.lru.Len() + c.lock.RUnlock() + return length +} diff --git a/vendor/github.com/hashicorp/golang-lru/testing.go b/vendor/github.com/hashicorp/golang-lru/testing.go new file mode 100644 index 00000000000..492760782c5 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/testing.go @@ -0,0 +1,16 @@ +package lru + +import ( + "crypto/rand" + "math" + "math/big" + "testing" +) + +func getRand(tb testing.TB) int64 { + out, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + tb.Fatal(err) + } + return out.Int64() +} diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE deleted file mode 100644 index f0e5c79e181..00000000000 --- a/vendor/github.com/hashicorp/yamux/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md deleted file mode 100644 index d4db7fc99be..00000000000 --- a/vendor/github.com/hashicorp/yamux/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Yamux - -Yamux (Yet another Multiplexer) is a multiplexing library for Golang. -It relies on an underlying connection to provide reliability -and ordering, such as TCP or Unix domain sockets, and provides -stream-oriented multiplexing. It is inspired by SPDY but is not -interoperable with it. - -Yamux features include: - -* Bi-directional streams - * Streams can be opened by either client or server - * Useful for NAT traversal - * Server-side push support -* Flow control - * Avoid starvation - * Back-pressure to prevent overwhelming a receiver -* Keep Alives - * Enables persistent connections over a load balancer -* Efficient - * Enables thousands of logical streams with low overhead - -## Documentation - -For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). - -## Specification - -The full specification for Yamux is provided in the `spec.md` file. -It can be used as a guide to implementors of interoperable libraries. - -## Usage - -Using Yamux is remarkably simple: - -```go - -func client() { - // Get a TCP connection - conn, err := net.Dial(...) - if err != nil { - panic(err) - } - - // Setup client side of yamux - session, err := yamux.Client(conn, nil) - if err != nil { - panic(err) - } - - // Open a new stream - stream, err := session.Open() - if err != nil { - panic(err) - } - - // Stream implements net.Conn - stream.Write([]byte("ping")) -} - -func server() { - // Accept a TCP connection - conn, err := listener.Accept() - if err != nil { - panic(err) - } - - // Setup server side of yamux - session, err := yamux.Server(conn, nil) - if err != nil { - panic(err) - } - - // Accept a stream - stream, err := session.Accept() - if err != nil { - panic(err) - } - - // Listen for a message - buf := make([]byte, 4) - stream.Read(buf) -} - -``` - diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go deleted file mode 100644 index f6a00199cdd..00000000000 --- a/vendor/github.com/hashicorp/yamux/addr.go +++ /dev/null @@ -1,60 +0,0 @@ -package yamux - -import ( - "fmt" - "net" -) - -// hasAddr is used to get the address from the underlying connection -type hasAddr interface { - LocalAddr() net.Addr - RemoteAddr() net.Addr -} - -// yamuxAddr is used when we cannot get the underlying address -type yamuxAddr struct { - Addr string -} - -func (*yamuxAddr) Network() string { - return "yamux" -} - -func (y *yamuxAddr) String() string { - return fmt.Sprintf("yamux:%s", y.Addr) -} - -// Addr is used to get the address of the listener. -func (s *Session) Addr() net.Addr { - return s.LocalAddr() -} - -// LocalAddr is used to get the local address of the -// underlying connection. -func (s *Session) LocalAddr() net.Addr { - addr, ok := s.conn.(hasAddr) - if !ok { - return &yamuxAddr{"local"} - } - return addr.LocalAddr() -} - -// RemoteAddr is used to get the address of remote end -// of the underlying connection -func (s *Session) RemoteAddr() net.Addr { - addr, ok := s.conn.(hasAddr) - if !ok { - return &yamuxAddr{"remote"} - } - return addr.RemoteAddr() -} - -// LocalAddr returns the local address -func (s *Stream) LocalAddr() net.Addr { - return s.session.LocalAddr() -} - -// RemoteAddr returns the remote address -func (s *Stream) RemoteAddr() net.Addr { - return s.session.RemoteAddr() -} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go deleted file mode 100644 index 2fdbf844a8e..00000000000 --- a/vendor/github.com/hashicorp/yamux/const.go +++ /dev/null @@ -1,182 +0,0 @@ -package yamux - -import ( - "encoding/binary" - "fmt" -) - -// NetError implements net.Error -type NetError struct { - err error - timeout bool - temporary bool -} - -func (e *NetError) Error() string { - return e.err.Error() -} - -func (e *NetError) Timeout() bool { - return e.timeout -} - -func (e *NetError) Temporary() bool { - return e.temporary -} - -var ( - // ErrInvalidVersion means we received a frame with an - // invalid version - ErrInvalidVersion = fmt.Errorf("invalid protocol version") - - // ErrInvalidMsgType means we received a frame with an - // invalid message type - ErrInvalidMsgType = fmt.Errorf("invalid msg type") - - // ErrSessionShutdown is used if there is a shutdown during - // an operation - ErrSessionShutdown = fmt.Errorf("session shutdown") - - // ErrStreamsExhausted is returned if we have no more - // stream ids to issue - ErrStreamsExhausted = fmt.Errorf("streams exhausted") - - // ErrDuplicateStream is used if a duplicate stream is - // opened inbound - ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") - - // ErrReceiveWindowExceeded indicates the window was exceeded - ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") - - // ErrTimeout is used when we reach an IO deadline - ErrTimeout = &NetError{ - err: fmt.Errorf("i/o deadline reached"), - - // Error should meet net.Error interface for timeouts for compatability - // with standard library expectations, such as http servers. - timeout: true, - } - - // ErrStreamClosed is returned when using a closed stream - ErrStreamClosed = fmt.Errorf("stream closed") - - // ErrUnexpectedFlag is set when we get an unexpected flag - ErrUnexpectedFlag = fmt.Errorf("unexpected flag") - - // ErrRemoteGoAway is used when we get a go away from the other side - ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") - - // ErrConnectionReset is sent if a stream is reset. This can happen - // if the backlog is exceeded, or if there was a remote GoAway. - ErrConnectionReset = fmt.Errorf("connection reset") - - // ErrConnectionWriteTimeout indicates that we hit the "safety valve" - // timeout writing to the underlying stream connection. - ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") - - // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close - ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") -) - -const ( - // protoVersion is the only version we support - protoVersion uint8 = 0 -) - -const ( - // Data is used for data frames. They are followed - // by length bytes worth of payload. - typeData uint8 = iota - - // WindowUpdate is used to change the window of - // a given stream. The length indicates the delta - // update to the window. - typeWindowUpdate - - // Ping is sent as a keep-alive or to measure - // the RTT. The StreamID and Length value are echoed - // back in the response. - typePing - - // GoAway is sent to terminate a session. The StreamID - // should be 0 and the length is an error code. - typeGoAway -) - -const ( - // SYN is sent to signal a new stream. May - // be sent with a data payload - flagSYN uint16 = 1 << iota - - // ACK is sent to acknowledge a new stream. May - // be sent with a data payload - flagACK - - // FIN is sent to half-close the given stream. - // May be sent with a data payload. - flagFIN - - // RST is used to hard close a given stream. - flagRST -) - -const ( - // initialStreamWindow is the initial stream window size - initialStreamWindow uint32 = 256 * 1024 -) - -const ( - // goAwayNormal is sent on a normal termination - goAwayNormal uint32 = iota - - // goAwayProtoErr sent on a protocol error - goAwayProtoErr - - // goAwayInternalErr sent on an internal error - goAwayInternalErr -) - -const ( - sizeOfVersion = 1 - sizeOfType = 1 - sizeOfFlags = 2 - sizeOfStreamID = 4 - sizeOfLength = 4 - headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + - sizeOfStreamID + sizeOfLength -) - -type header []byte - -func (h header) Version() uint8 { - return h[0] -} - -func (h header) MsgType() uint8 { - return h[1] -} - -func (h header) Flags() uint16 { - return binary.BigEndian.Uint16(h[2:4]) -} - -func (h header) StreamID() uint32 { - return binary.BigEndian.Uint32(h[4:8]) -} - -func (h header) Length() uint32 { - return binary.BigEndian.Uint32(h[8:12]) -} - -func (h header) String() string { - return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", - h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) -} - -func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { - h[0] = protoVersion - h[1] = msgType - binary.BigEndian.PutUint16(h[2:4], flags) - binary.BigEndian.PutUint32(h[4:8], streamID) - binary.BigEndian.PutUint32(h[8:12], length) -} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go deleted file mode 100644 index 0c3e67b022a..00000000000 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ /dev/null @@ -1,114 +0,0 @@ -package yamux - -import ( - "fmt" - "io" - "log" - "os" - "time" -) - -// Config is used to tune the Yamux session -type Config struct { - // AcceptBacklog is used to limit how many streams may be - // waiting an accept. - AcceptBacklog int - - // EnableKeepalive is used to do a period keep alive - // messages using a ping. - EnableKeepAlive bool - - // KeepAliveInterval is how often to perform the keep alive - KeepAliveInterval time.Duration - - // ConnectionWriteTimeout is meant to be a "safety valve" timeout after - // we which will suspect a problem with the underlying connection and - // close it. This is only applied to writes, where's there's generally - // an expectation that things will move along quickly. - ConnectionWriteTimeout time.Duration - - // MaxStreamWindowSize is used to control the maximum - // window size that we allow for a stream. - MaxStreamWindowSize uint32 - - // StreamOpenTimeout is the maximum amount of time that a stream will - // be allowed to remain in pending state while waiting for an ack from the peer. - // Once the timeout is reached the session will be gracefully closed. - // A zero value disables the StreamOpenTimeout allowing unbounded - // blocking on OpenStream calls. - StreamOpenTimeout time.Duration - - // StreamCloseTimeout is the maximum time that a stream will allowed to - // be in a half-closed state when `Close` is called before forcibly - // closing the connection. Forcibly closed connections will empty the - // receive buffer, drop any future packets received for that stream, - // and send a RST to the remote side. - StreamCloseTimeout time.Duration - - // LogOutput is used to control the log destination. Either Logger or - // LogOutput can be set, not both. - LogOutput io.Writer - - // Logger is used to pass in the logger to be used. Either Logger or - // LogOutput can be set, not both. - Logger *log.Logger -} - -// DefaultConfig is used to return a default configuration -func DefaultConfig() *Config { - return &Config{ - AcceptBacklog: 256, - EnableKeepAlive: true, - KeepAliveInterval: 30 * time.Second, - ConnectionWriteTimeout: 10 * time.Second, - MaxStreamWindowSize: initialStreamWindow, - StreamCloseTimeout: 5 * time.Minute, - StreamOpenTimeout: 75 * time.Second, - LogOutput: os.Stderr, - } -} - -// VerifyConfig is used to verify the sanity of configuration -func VerifyConfig(config *Config) error { - if config.AcceptBacklog <= 0 { - return fmt.Errorf("backlog must be positive") - } - if config.KeepAliveInterval == 0 { - return fmt.Errorf("keep-alive interval must be positive") - } - if config.MaxStreamWindowSize < initialStreamWindow { - return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) - } - if config.LogOutput != nil && config.Logger != nil { - return fmt.Errorf("both Logger and LogOutput may not be set, select one") - } else if config.LogOutput == nil && config.Logger == nil { - return fmt.Errorf("one of Logger or LogOutput must be set, select one") - } - return nil -} - -// Server is used to initialize a new server-side connection. -// There must be at most one server-side connection. If a nil config is -// provided, the DefaultConfiguration will be used. -func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, false), nil -} - -// Client is used to initialize a new client-side connection. -// There must be at most one client-side connection. -func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { - if config == nil { - config = DefaultConfig() - } - - if err := VerifyConfig(config); err != nil { - return nil, err - } - return newSession(config, conn, true), nil -} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go deleted file mode 100644 index 38fe3ed1f06..00000000000 --- a/vendor/github.com/hashicorp/yamux/session.go +++ /dev/null @@ -1,732 +0,0 @@ -package yamux - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Session is used to wrap a reliable ordered connection and to -// multiplex it into multiple streams. -type Session struct { - // remoteGoAway indicates the remote side does - // not want futher connections. Must be first for alignment. - remoteGoAway int32 - - // localGoAway indicates that we should stop - // accepting futher connections. Must be first for alignment. - localGoAway int32 - - // nextStreamID is the next stream we should - // send. This depends if we are a client/server. - nextStreamID uint32 - - // config holds our configuration - config *Config - - // logger is used for our logs - logger *log.Logger - - // conn is the underlying connection - conn io.ReadWriteCloser - - // bufRead is a buffered reader - bufRead *bufio.Reader - - // pings is used to track inflight pings - pings map[uint32]chan struct{} - pingID uint32 - pingLock sync.Mutex - - // streams maps a stream id to a stream, and inflight has an entry - // for any outgoing stream that has not yet been established. Both are - // protected by streamLock. - streams map[uint32]*Stream - inflight map[uint32]struct{} - streamLock sync.Mutex - - // synCh acts like a semaphore. It is sized to the AcceptBacklog which - // is assumed to be symmetric between the client and server. This allows - // the client to avoid exceeding the backlog and instead blocks the open. - synCh chan struct{} - - // acceptCh is used to pass ready streams to the client - acceptCh chan *Stream - - // sendCh is used to mark a stream as ready to send, - // or to send a header out directly. - sendCh chan *sendReady - - // recvDoneCh is closed when recv() exits to avoid a race - // between stream registration and stream shutdown - recvDoneCh chan struct{} - sendDoneCh chan struct{} - - // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex - shutdownErrLock sync.Mutex -} - -// sendReady is used to either mark a stream as ready -// or to directly send a header -type sendReady struct { - Hdr []byte - mu sync.Mutex // Protects Body from unsafe reads. - Body []byte - Err chan error -} - -// newSession is used to construct a new session -func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { - logger := config.Logger - if logger == nil { - logger = log.New(config.LogOutput, "", log.LstdFlags) - } - - s := &Session{ - config: config, - logger: logger, - conn: conn, - bufRead: bufio.NewReader(conn), - pings: make(map[uint32]chan struct{}), - streams: make(map[uint32]*Stream), - inflight: make(map[uint32]struct{}), - synCh: make(chan struct{}, config.AcceptBacklog), - acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan *sendReady, 64), - recvDoneCh: make(chan struct{}), - sendDoneCh: make(chan struct{}), - shutdownCh: make(chan struct{}), - } - if client { - s.nextStreamID = 1 - } else { - s.nextStreamID = 2 - } - go s.recv() - go s.send() - if config.EnableKeepAlive { - go s.keepalive() - } - return s -} - -// IsClosed does a safe check to see if we have shutdown -func (s *Session) IsClosed() bool { - select { - case <-s.shutdownCh: - return true - default: - return false - } -} - -// CloseChan returns a read-only channel which is closed as -// soon as the session is closed. -func (s *Session) CloseChan() <-chan struct{} { - return s.shutdownCh -} - -// NumStreams returns the number of currently open streams -func (s *Session) NumStreams() int { - s.streamLock.Lock() - num := len(s.streams) - s.streamLock.Unlock() - return num -} - -// Open is used to create a new stream as a net.Conn -func (s *Session) Open() (net.Conn, error) { - conn, err := s.OpenStream() - if err != nil { - return nil, err - } - return conn, nil -} - -// OpenStream is used to create a new stream -func (s *Session) OpenStream() (*Stream, error) { - if s.IsClosed() { - return nil, ErrSessionShutdown - } - if atomic.LoadInt32(&s.remoteGoAway) == 1 { - return nil, ErrRemoteGoAway - } - - // Block if we have too many inflight SYNs - select { - case s.synCh <- struct{}{}: - case <-s.shutdownCh: - return nil, ErrSessionShutdown - } - -GET_ID: - // Get an ID, and check for stream exhaustion - id := atomic.LoadUint32(&s.nextStreamID) - if id >= math.MaxUint32-1 { - return nil, ErrStreamsExhausted - } - if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { - goto GET_ID - } - - // Register the stream - stream := newStream(s, id, streamInit) - s.streamLock.Lock() - s.streams[id] = stream - s.inflight[id] = struct{}{} - s.streamLock.Unlock() - - if s.config.StreamOpenTimeout > 0 { - go s.setOpenTimeout(stream) - } - - // Send the window update to create - if err := stream.sendWindowUpdate(); err != nil { - select { - case <-s.synCh: - default: - s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") - } - return nil, err - } - return stream, nil -} - -// setOpenTimeout implements a timeout for streams that are opened but not established. -// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, -// and close the session. -// The number of running timers is bounded by the capacity of the synCh. -func (s *Session) setOpenTimeout(stream *Stream) { - timer := time.NewTimer(s.config.StreamOpenTimeout) - defer timer.Stop() - - select { - case <-stream.establishCh: - return - case <-s.shutdownCh: - return - case <-timer.C: - // Timeout reached while waiting for ACK. - // Close the session to force connection re-establishment. - s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) - s.Close() - } -} - -// Accept is used to block until the next available stream -// is ready to be accepted. -func (s *Session) Accept() (net.Conn, error) { - conn, err := s.AcceptStream() - if err != nil { - return nil, err - } - return conn, err -} - -// AcceptStream is used to block until the next available stream -// is ready to be accepted. -func (s *Session) AcceptStream() (*Stream, error) { - select { - case stream := <-s.acceptCh: - if err := stream.sendWindowUpdate(); err != nil { - return nil, err - } - return stream, nil - case <-s.shutdownCh: - return nil, s.shutdownErr - } -} - -// Close is used to close the session and all streams. -// Attempts to send a GoAway before closing the connection. -func (s *Session) Close() error { - s.shutdownLock.Lock() - defer s.shutdownLock.Unlock() - - if s.shutdown { - return nil - } - s.shutdown = true - - s.shutdownErrLock.Lock() - if s.shutdownErr == nil { - s.shutdownErr = ErrSessionShutdown - } - s.shutdownErrLock.Unlock() - - close(s.shutdownCh) - - s.conn.Close() - <-s.recvDoneCh - - s.streamLock.Lock() - defer s.streamLock.Unlock() - for _, stream := range s.streams { - stream.forceClose() - } - <-s.sendDoneCh - return nil -} - -// exitErr is used to handle an error that is causing the -// session to terminate. -func (s *Session) exitErr(err error) { - s.shutdownErrLock.Lock() - if s.shutdownErr == nil { - s.shutdownErr = err - } - s.shutdownErrLock.Unlock() - s.Close() -} - -// GoAway can be used to prevent accepting further -// connections. It does not close the underlying conn. -func (s *Session) GoAway() error { - return s.waitForSend(s.goAway(goAwayNormal), nil) -} - -// goAway is used to send a goAway message -func (s *Session) goAway(reason uint32) header { - atomic.SwapInt32(&s.localGoAway, 1) - hdr := header(make([]byte, headerSize)) - hdr.encode(typeGoAway, 0, 0, reason) - return hdr -} - -// Ping is used to measure the RTT response time -func (s *Session) Ping() (time.Duration, error) { - // Get a channel for the ping - ch := make(chan struct{}) - - // Get a new ping id, mark as pending - s.pingLock.Lock() - id := s.pingID - s.pingID++ - s.pings[id] = ch - s.pingLock.Unlock() - - // Send the ping request - hdr := header(make([]byte, headerSize)) - hdr.encode(typePing, flagSYN, 0, id) - if err := s.waitForSend(hdr, nil); err != nil { - return 0, err - } - - // Wait for a response - start := time.Now() - select { - case <-ch: - case <-time.After(s.config.ConnectionWriteTimeout): - s.pingLock.Lock() - delete(s.pings, id) // Ignore it if a response comes later. - s.pingLock.Unlock() - return 0, ErrTimeout - case <-s.shutdownCh: - return 0, ErrSessionShutdown - } - - // Compute the RTT - return time.Now().Sub(start), nil -} - -// keepalive is a long running goroutine that periodically does -// a ping to keep the connection alive. -func (s *Session) keepalive() { - for { - select { - case <-time.After(s.config.KeepAliveInterval): - _, err := s.Ping() - if err != nil { - if err != ErrSessionShutdown { - s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) - s.exitErr(ErrKeepAliveTimeout) - } - return - } - case <-s.shutdownCh: - return - } - } -} - -// waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body []byte) error { - errCh := make(chan error, 1) - return s.waitForSendErr(hdr, body, errCh) -} - -// waitForSendErr waits to send a header with optional data, checking for a -// potential shutdown. Since there's the expectation that sends can happen -// in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { - t := timerPool.Get() - timer := t.(*time.Timer) - timer.Reset(s.config.ConnectionWriteTimeout) - defer func() { - timer.Stop() - select { - case <-timer.C: - default: - } - timerPool.Put(t) - }() - - ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} - select { - case s.sendCh <- ready: - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } - - bodyCopy := func() { - if body == nil { - return // A nil body is ignored. - } - - // In the event of session shutdown or connection write timeout, - // we need to prevent `send` from reading the body buffer after - // returning from this function since the caller may re-use the - // underlying array. - ready.mu.Lock() - defer ready.mu.Unlock() - - if ready.Body == nil { - return // Body was already copied in `send`. - } - newBody := make([]byte, len(body)) - copy(newBody, body) - ready.Body = newBody - } - - select { - case err := <-errCh: - return err - case <-s.shutdownCh: - bodyCopy() - return ErrSessionShutdown - case <-timer.C: - bodyCopy() - return ErrConnectionWriteTimeout - } -} - -// sendNoWait does a send without waiting. Since there's the expectation that -// the send happens right here, we enforce the connection write timeout if we -// can't queue the header to be sent. -func (s *Session) sendNoWait(hdr header) error { - t := timerPool.Get() - timer := t.(*time.Timer) - timer.Reset(s.config.ConnectionWriteTimeout) - defer func() { - timer.Stop() - select { - case <-timer.C: - default: - } - timerPool.Put(t) - }() - - select { - case s.sendCh <- &sendReady{Hdr: hdr}: - return nil - case <-s.shutdownCh: - return ErrSessionShutdown - case <-timer.C: - return ErrConnectionWriteTimeout - } -} - -// send is a long running goroutine that sends data -func (s *Session) send() { - if err := s.sendLoop(); err != nil { - s.exitErr(err) - } -} - -func (s *Session) sendLoop() error { - defer close(s.sendDoneCh) - var bodyBuf bytes.Buffer - for { - bodyBuf.Reset() - - select { - case ready := <-s.sendCh: - // Send a header if ready - if ready.Hdr != nil { - _, err := s.conn.Write(ready.Hdr) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - return err - } - } - - ready.mu.Lock() - if ready.Body != nil { - // Copy the body into the buffer to avoid - // holding a mutex lock during the write. - _, err := bodyBuf.Write(ready.Body) - if err != nil { - ready.Body = nil - ready.mu.Unlock() - s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) - asyncSendErr(ready.Err, err) - return err - } - ready.Body = nil - } - ready.mu.Unlock() - - if bodyBuf.Len() > 0 { - // Send data from a body if given - _, err := s.conn.Write(bodyBuf.Bytes()) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) - asyncSendErr(ready.Err, err) - return err - } - } - - // No error, successful send - asyncSendErr(ready.Err, nil) - case <-s.shutdownCh: - return nil - } - } -} - -// recv is a long running goroutine that accepts new data -func (s *Session) recv() { - if err := s.recvLoop(); err != nil { - s.exitErr(err) - } -} - -// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type -var ( - handlers = []func(*Session, header) error{ - typeData: (*Session).handleStreamMessage, - typeWindowUpdate: (*Session).handleStreamMessage, - typePing: (*Session).handlePing, - typeGoAway: (*Session).handleGoAway, - } -) - -// recvLoop continues to receive data until a fatal error is encountered -func (s *Session) recvLoop() error { - defer close(s.recvDoneCh) - hdr := header(make([]byte, headerSize)) - for { - // Read the header - if _, err := io.ReadFull(s.bufRead, hdr); err != nil { - if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { - s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) - } - return err - } - - // Verify the version - if hdr.Version() != protoVersion { - s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) - return ErrInvalidVersion - } - - mt := hdr.MsgType() - if mt < typeData || mt > typeGoAway { - return ErrInvalidMsgType - } - - if err := handlers[mt](s, hdr); err != nil { - return err - } - } -} - -// handleStreamMessage handles either a data or window update frame -func (s *Session) handleStreamMessage(hdr header) error { - // Check for a new stream creation - id := hdr.StreamID() - flags := hdr.Flags() - if flags&flagSYN == flagSYN { - if err := s.incomingStream(id); err != nil { - return err - } - } - - // Get the stream - s.streamLock.Lock() - stream := s.streams[id] - s.streamLock.Unlock() - - // If we do not have a stream, likely we sent a RST - if stream == nil { - // Drain any data on the wire - if hdr.MsgType() == typeData && hdr.Length() > 0 { - s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) - if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { - s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) - return nil - } - } else { - s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) - } - return nil - } - - // Check if this is a window update - if hdr.MsgType() == typeWindowUpdate { - if err := stream.incrSendWindow(hdr, flags); err != nil { - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return err - } - return nil - } - - // Read the new data - if err := stream.readData(hdr, flags, s.bufRead); err != nil { - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return err - } - return nil -} - -// handlePing is invokde for a typePing frame -func (s *Session) handlePing(hdr header) error { - flags := hdr.Flags() - pingID := hdr.Length() - - // Check if this is a query, respond back in a separate context so we - // don't interfere with the receiving thread blocking for the write. - if flags&flagSYN == flagSYN { - go func() { - hdr := header(make([]byte, headerSize)) - hdr.encode(typePing, flagACK, 0, pingID) - if err := s.sendNoWait(hdr); err != nil { - s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) - } - }() - return nil - } - - // Handle a response - s.pingLock.Lock() - ch := s.pings[pingID] - if ch != nil { - delete(s.pings, pingID) - close(ch) - } - s.pingLock.Unlock() - return nil -} - -// handleGoAway is invokde for a typeGoAway frame -func (s *Session) handleGoAway(hdr header) error { - code := hdr.Length() - switch code { - case goAwayNormal: - atomic.SwapInt32(&s.remoteGoAway, 1) - case goAwayProtoErr: - s.logger.Printf("[ERR] yamux: received protocol error go away") - return fmt.Errorf("yamux protocol error") - case goAwayInternalErr: - s.logger.Printf("[ERR] yamux: received internal error go away") - return fmt.Errorf("remote yamux internal error") - default: - s.logger.Printf("[ERR] yamux: received unexpected go away") - return fmt.Errorf("unexpected go away received") - } - return nil -} - -// incomingStream is used to create a new incoming stream -func (s *Session) incomingStream(id uint32) error { - // Reject immediately if we are doing a go away - if atomic.LoadInt32(&s.localGoAway) == 1 { - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(hdr) - } - - // Allocate a new stream - stream := newStream(s, id, streamSYNReceived) - - s.streamLock.Lock() - defer s.streamLock.Unlock() - - // Check if stream already exists - if _, ok := s.streams[id]; ok { - s.logger.Printf("[ERR] yamux: duplicate stream declared") - if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { - s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) - } - return ErrDuplicateStream - } - - // Register the stream - s.streams[id] = stream - - // Check if we've exceeded the backlog - select { - case s.acceptCh <- stream: - return nil - default: - // Backlog exceeded! RST the stream - s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") - delete(s.streams, id) - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(hdr) - } -} - -// closeStream is used to close a stream once both sides have -// issued a close. If there was an in-flight SYN and the stream -// was not yet established, then this will give the credit back. -func (s *Session) closeStream(id uint32) { - s.streamLock.Lock() - if _, ok := s.inflight[id]; ok { - select { - case <-s.synCh: - default: - s.logger.Printf("[ERR] yamux: SYN tracking out of sync") - } - } - delete(s.streams, id) - s.streamLock.Unlock() -} - -// establishStream is used to mark a stream that was in the -// SYN Sent state as established. -func (s *Session) establishStream(id uint32) { - s.streamLock.Lock() - if _, ok := s.inflight[id]; ok { - delete(s.inflight, id) - } else { - s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") - } - select { - case <-s.synCh: - default: - s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") - } - s.streamLock.Unlock() -} diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md deleted file mode 100644 index 183d797bdea..00000000000 --- a/vendor/github.com/hashicorp/yamux/spec.md +++ /dev/null @@ -1,140 +0,0 @@ -# Specification - -We use this document to detail the internal specification of Yamux. -This is used both as a guide for implementing Yamux, but also for -alternative interoperable libraries to be built. - -# Framing - -Yamux uses a streaming connection underneath, but imposes a message -framing so that it can be shared between many logical streams. Each -frame contains a header like: - -* Version (8 bits) -* Type (8 bits) -* Flags (16 bits) -* StreamID (32 bits) -* Length (32 bits) - -This means that each header has a 12 byte overhead. -All fields are encoded in network order (big endian). -Each field is described below: - -## Version Field - -The version field is used for future backward compatibility. At the -current time, the field is always set to 0, to indicate the initial -version. - -## Type Field - -The type field is used to switch the frame message type. The following -message types are supported: - -* 0x0 Data - Used to transmit data. May transmit zero length payloads - depending on the flags. - -* 0x1 Window Update - Used to updated the senders receive window size. - This is used to implement per-session flow control. - -* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat - and do keep-alives over TCP. - -* 0x3 Go Away - Used to close a session. - -## Flag Field - -The flags field is used to provide additional information related -to the message type. The following flags are supported: - -* 0x1 SYN - Signals the start of a new stream. May be sent with a data or - window update message. Also sent with a ping to indicate outbound. - -* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data - or window update message. Also sent with a ping to indicate response. - -* 0x4 FIN - Performs a half-close of a stream. May be sent with a data - message or window update. - -* 0x8 RST - Reset a stream immediately. May be sent with a data or - window update message. - -## StreamID Field - -The StreamID field is used to identify the logical stream the frame -is addressing. The client side should use odd ID's, and the server even. -This prevents any collisions. Additionally, the 0 ID is reserved to represent -the session. - -Both Ping and Go Away messages should always use the 0 StreamID. - -## Length Field - -The meaning of the length field depends on the message type: - -* Data - provides the length of bytes following the header -* Window update - provides a delta update to the window size -* Ping - Contains an opaque value, echoed back -* Go Away - Contains an error code - -# Message Flow - -There is no explicit connection setup, as Yamux relies on an underlying -transport to be provided. However, there is a distinction between client -and server side of the connection. - -## Opening a stream - -To open a stream, an initial data or window update frame is sent -with a new StreamID. The SYN flag should be set to signal a new stream. - -The receiver must then reply with either a data or window update frame -with the StreamID along with the ACK flag to accept the stream or with -the RST flag to reject the stream. - -Because we are relying on the reliable stream underneath, a connection -can begin sending data once the SYN flag is sent. The corresponding -ACK does not need to be received. This is particularly well suited -for an RPC system where a client wants to open a stream and immediately -fire a request without waiting for the RTT of the ACK. - -This does introduce the possibility of a connection being rejected -after data has been sent already. This is a slight semantic difference -from TCP, where the conection cannot be refused after it is opened. -Clients should be prepared to handle this by checking for an error -that indicates a RST was received. - -## Closing a stream - -To close a stream, either side sends a data or window update frame -along with the FIN flag. This does a half-close indicating the sender -will send no further data. - -Once both sides have closed the connection, the stream is closed. - -Alternatively, if an error occurs, the RST flag can be used to -hard close a stream immediately. - -## Flow Control - -When Yamux is initially starts each stream with a 256KB window size. -There is no window size for the session. - -To prevent the streams from stalling, window update frames should be -sent regularly. Yamux can be configured to provide a larger limit for -windows sizes. Both sides assume the initial 256KB window, but can -immediately send a window update as part of the SYN/ACK indicating a -larger window. - -Both sides should track the number of bytes sent in Data frames -only, as only they are tracked as part of the window size. - -## Session termination - -When a session is being terminated, the Go Away message should -be sent. The Length should be set to one of the following to -provide an error code: - -* 0x0 Normal termination -* 0x1 Protocol error -* 0x2 Internal error diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go deleted file mode 100644 index 23d08fcc8da..00000000000 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ /dev/null @@ -1,544 +0,0 @@ -package yamux - -import ( - "bytes" - "errors" - "io" - "sync" - "sync/atomic" - "time" -) - -type streamState int - -const ( - streamInit streamState = iota - streamSYNSent - streamSYNReceived - streamEstablished - streamLocalClose - streamRemoteClose - streamClosed - streamReset -) - -// Stream is used to represent a logical stream -// within a session. -type Stream struct { - recvWindow uint32 - sendWindow uint32 - - id uint32 - session *Session - - state streamState - stateLock sync.Mutex - - recvBuf *bytes.Buffer - recvLock sync.Mutex - - controlHdr header - controlErr chan error - controlHdrLock sync.Mutex - - sendHdr header - sendErr chan error - sendLock sync.Mutex - - recvNotifyCh chan struct{} - sendNotifyCh chan struct{} - - readDeadline atomic.Value // time.Time - writeDeadline atomic.Value // time.Time - - // establishCh is notified if the stream is established or being closed. - establishCh chan struct{} - - // closeTimer is set with stateLock held to honor the StreamCloseTimeout - // setting on Session. - closeTimer *time.Timer -} - -// newStream is used to construct a new stream within -// a given session for an ID -func newStream(session *Session, id uint32, state streamState) *Stream { - s := &Stream{ - id: id, - session: session, - state: state, - controlHdr: header(make([]byte, headerSize)), - controlErr: make(chan error, 1), - sendHdr: header(make([]byte, headerSize)), - sendErr: make(chan error, 1), - recvWindow: initialStreamWindow, - sendWindow: initialStreamWindow, - recvNotifyCh: make(chan struct{}, 1), - sendNotifyCh: make(chan struct{}, 1), - establishCh: make(chan struct{}, 1), - } - s.readDeadline.Store(time.Time{}) - s.writeDeadline.Store(time.Time{}) - return s -} - -// Session returns the associated stream session -func (s *Stream) Session() *Session { - return s.session -} - -// StreamID returns the ID of this stream -func (s *Stream) StreamID() uint32 { - return s.id -} - -// Read is used to read from the stream -func (s *Stream) Read(b []byte) (n int, err error) { - defer asyncNotify(s.recvNotifyCh) -START: - s.stateLock.Lock() - switch s.state { - case streamLocalClose: - fallthrough - case streamRemoteClose: - fallthrough - case streamClosed: - s.recvLock.Lock() - if s.recvBuf == nil || s.recvBuf.Len() == 0 { - s.recvLock.Unlock() - s.stateLock.Unlock() - return 0, io.EOF - } - s.recvLock.Unlock() - case streamReset: - s.stateLock.Unlock() - return 0, ErrConnectionReset - } - s.stateLock.Unlock() - - // If there is no data available, block - s.recvLock.Lock() - if s.recvBuf == nil || s.recvBuf.Len() == 0 { - s.recvLock.Unlock() - goto WAIT - } - - // Read any bytes - n, _ = s.recvBuf.Read(b) - s.recvLock.Unlock() - - // Send a window update potentially - err = s.sendWindowUpdate() - if err == ErrSessionShutdown { - err = nil - } - return n, err - -WAIT: - var timeout <-chan time.Time - var timer *time.Timer - readDeadline := s.readDeadline.Load().(time.Time) - if !readDeadline.IsZero() { - delay := readDeadline.Sub(time.Now()) - timer = time.NewTimer(delay) - timeout = timer.C - } - select { - case <-s.recvNotifyCh: - if timer != nil { - timer.Stop() - } - goto START - case <-timeout: - return 0, ErrTimeout - } -} - -// Write is used to write to the stream -func (s *Stream) Write(b []byte) (n int, err error) { - s.sendLock.Lock() - defer s.sendLock.Unlock() - total := 0 - for total < len(b) { - n, err := s.write(b[total:]) - total += n - if err != nil { - return total, err - } - } - return total, nil -} - -// write is used to write to the stream, may return on -// a short write. -func (s *Stream) write(b []byte) (n int, err error) { - var flags uint16 - var max uint32 - var body []byte -START: - s.stateLock.Lock() - switch s.state { - case streamLocalClose: - fallthrough - case streamClosed: - s.stateLock.Unlock() - return 0, ErrStreamClosed - case streamReset: - s.stateLock.Unlock() - return 0, ErrConnectionReset - } - s.stateLock.Unlock() - - // If there is no data available, block - window := atomic.LoadUint32(&s.sendWindow) - if window == 0 { - goto WAIT - } - - // Determine the flags if any - flags = s.sendFlags() - - // Send up to our send window - max = min(window, uint32(len(b))) - body = b[:max] - - // Send the header - s.sendHdr.encode(typeData, flags, s.id, max) - if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { - if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { - // Message left in ready queue, header re-use is unsafe. - s.sendHdr = header(make([]byte, headerSize)) - } - return 0, err - } - - // Reduce our send window - atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) - - // Unlock - return int(max), err - -WAIT: - var timeout <-chan time.Time - writeDeadline := s.writeDeadline.Load().(time.Time) - if !writeDeadline.IsZero() { - delay := writeDeadline.Sub(time.Now()) - timeout = time.After(delay) - } - select { - case <-s.sendNotifyCh: - goto START - case <-timeout: - return 0, ErrTimeout - } - return 0, nil -} - -// sendFlags determines any flags that are appropriate -// based on the current stream state -func (s *Stream) sendFlags() uint16 { - s.stateLock.Lock() - defer s.stateLock.Unlock() - var flags uint16 - switch s.state { - case streamInit: - flags |= flagSYN - s.state = streamSYNSent - case streamSYNReceived: - flags |= flagACK - s.state = streamEstablished - } - return flags -} - -// sendWindowUpdate potentially sends a window update enabling -// further writes to take place. Must be invoked with the lock. -func (s *Stream) sendWindowUpdate() error { - s.controlHdrLock.Lock() - defer s.controlHdrLock.Unlock() - - // Determine the delta update - max := s.session.config.MaxStreamWindowSize - var bufLen uint32 - s.recvLock.Lock() - if s.recvBuf != nil { - bufLen = uint32(s.recvBuf.Len()) - } - delta := (max - bufLen) - s.recvWindow - - // Determine the flags if any - flags := s.sendFlags() - - // Check if we can omit the update - if delta < (max/2) && flags == 0 { - s.recvLock.Unlock() - return nil - } - - // Update our window - s.recvWindow += delta - s.recvLock.Unlock() - - // Send the header - s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) - if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { - if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { - // Message left in ready queue, header re-use is unsafe. - s.controlHdr = header(make([]byte, headerSize)) - } - return err - } - return nil -} - -// sendClose is used to send a FIN -func (s *Stream) sendClose() error { - s.controlHdrLock.Lock() - defer s.controlHdrLock.Unlock() - - flags := s.sendFlags() - flags |= flagFIN - s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) - if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { - if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { - // Message left in ready queue, header re-use is unsafe. - s.controlHdr = header(make([]byte, headerSize)) - } - return err - } - return nil -} - -// Close is used to close the stream -func (s *Stream) Close() error { - closeStream := false - s.stateLock.Lock() - switch s.state { - // Opened means we need to signal a close - case streamSYNSent: - fallthrough - case streamSYNReceived: - fallthrough - case streamEstablished: - s.state = streamLocalClose - goto SEND_CLOSE - - case streamLocalClose: - case streamRemoteClose: - s.state = streamClosed - closeStream = true - goto SEND_CLOSE - - case streamClosed: - case streamReset: - default: - panic("unhandled state") - } - s.stateLock.Unlock() - return nil -SEND_CLOSE: - // This shouldn't happen (the more realistic scenario to cancel the - // timer is via processFlags) but just in case this ever happens, we - // cancel the timer to prevent dangling timers. - if s.closeTimer != nil { - s.closeTimer.Stop() - s.closeTimer = nil - } - - // If we have a StreamCloseTimeout set we start the timeout timer. - // We do this only if we're not already closing the stream since that - // means this was a graceful close. - // - // This prevents memory leaks if one side (this side) closes and the - // remote side poorly behaves and never responds with a FIN to complete - // the close. After the specified timeout, we clean our resources up no - // matter what. - if !closeStream && s.session.config.StreamCloseTimeout > 0 { - s.closeTimer = time.AfterFunc( - s.session.config.StreamCloseTimeout, s.closeTimeout) - } - - s.stateLock.Unlock() - s.sendClose() - s.notifyWaiting() - if closeStream { - s.session.closeStream(s.id) - } - return nil -} - -// closeTimeout is called after StreamCloseTimeout during a close to -// close this stream. -func (s *Stream) closeTimeout() { - // Close our side forcibly - s.forceClose() - - // Free the stream from the session map - s.session.closeStream(s.id) - - // Send a RST so the remote side closes too. - s.sendLock.Lock() - defer s.sendLock.Unlock() - hdr := header(make([]byte, headerSize)) - hdr.encode(typeWindowUpdate, flagRST, s.id, 0) - s.session.sendNoWait(hdr) -} - -// forceClose is used for when the session is exiting -func (s *Stream) forceClose() { - s.stateLock.Lock() - s.state = streamClosed - s.stateLock.Unlock() - s.notifyWaiting() -} - -// processFlags is used to update the state of the stream -// based on set flags, if any. Lock must be held -func (s *Stream) processFlags(flags uint16) error { - s.stateLock.Lock() - defer s.stateLock.Unlock() - - // Close the stream without holding the state lock - closeStream := false - defer func() { - if closeStream { - if s.closeTimer != nil { - // Stop our close timeout timer since we gracefully closed - s.closeTimer.Stop() - } - - s.session.closeStream(s.id) - } - }() - - if flags&flagACK == flagACK { - if s.state == streamSYNSent { - s.state = streamEstablished - } - asyncNotify(s.establishCh) - s.session.establishStream(s.id) - } - if flags&flagFIN == flagFIN { - switch s.state { - case streamSYNSent: - fallthrough - case streamSYNReceived: - fallthrough - case streamEstablished: - s.state = streamRemoteClose - s.notifyWaiting() - case streamLocalClose: - s.state = streamClosed - closeStream = true - s.notifyWaiting() - default: - s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) - return ErrUnexpectedFlag - } - } - if flags&flagRST == flagRST { - s.state = streamReset - closeStream = true - s.notifyWaiting() - } - return nil -} - -// notifyWaiting notifies all the waiting channels -func (s *Stream) notifyWaiting() { - asyncNotify(s.recvNotifyCh) - asyncNotify(s.sendNotifyCh) - asyncNotify(s.establishCh) -} - -// incrSendWindow updates the size of our send window -func (s *Stream) incrSendWindow(hdr header, flags uint16) error { - if err := s.processFlags(flags); err != nil { - return err - } - - // Increase window, unblock a sender - atomic.AddUint32(&s.sendWindow, hdr.Length()) - asyncNotify(s.sendNotifyCh) - return nil -} - -// readData is used to handle a data frame -func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { - if err := s.processFlags(flags); err != nil { - return err - } - - // Check that our recv window is not exceeded - length := hdr.Length() - if length == 0 { - return nil - } - - // Wrap in a limited reader - conn = &io.LimitedReader{R: conn, N: int64(length)} - - // Copy into buffer - s.recvLock.Lock() - - if length > s.recvWindow { - s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) - s.recvLock.Unlock() - return ErrRecvWindowExceeded - } - - if s.recvBuf == nil { - // Allocate the receive buffer just-in-time to fit the full data frame. - // This way we can read in the whole packet without further allocations. - s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) - } - copiedLength, err := io.Copy(s.recvBuf, conn) - if err != nil { - s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) - s.recvLock.Unlock() - return err - } - - // Decrement the receive window - s.recvWindow -= uint32(copiedLength) - s.recvLock.Unlock() - - // Unblock any readers - asyncNotify(s.recvNotifyCh) - return nil -} - -// SetDeadline sets the read and write deadlines -func (s *Stream) SetDeadline(t time.Time) error { - if err := s.SetReadDeadline(t); err != nil { - return err - } - if err := s.SetWriteDeadline(t); err != nil { - return err - } - return nil -} - -// SetReadDeadline sets the deadline for blocked and future Read calls. -func (s *Stream) SetReadDeadline(t time.Time) error { - s.readDeadline.Store(t) - asyncNotify(s.recvNotifyCh) - return nil -} - -// SetWriteDeadline sets the deadline for blocked and future Write calls -func (s *Stream) SetWriteDeadline(t time.Time) error { - s.writeDeadline.Store(t) - asyncNotify(s.sendNotifyCh) - return nil -} - -// Shrink is used to compact the amount of buffers utilized -// This is useful when using Yamux in a connection pool to reduce -// the idle memory utilization. -func (s *Stream) Shrink() { - s.recvLock.Lock() - if s.recvBuf != nil && s.recvBuf.Len() == 0 { - s.recvBuf = nil - } - s.recvLock.Unlock() -} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go deleted file mode 100644 index 8a73e9249a6..00000000000 --- a/vendor/github.com/hashicorp/yamux/util.go +++ /dev/null @@ -1,43 +0,0 @@ -package yamux - -import ( - "sync" - "time" -) - -var ( - timerPool = &sync.Pool{ - New: func() interface{} { - timer := time.NewTimer(time.Hour * 1e6) - timer.Stop() - return timer - }, - } -) - -// asyncSendErr is used to try an async send of an error -func asyncSendErr(ch chan error, err error) { - if ch == nil { - return - } - select { - case ch <- err: - default: - } -} - -// asyncNotify is used to signal a waiting goroutine -func asyncNotify(ch chan struct{}) { - select { - case ch <- struct{}{}: - default: - } -} - -// min computes the minimum of two values -func min(a, b uint32) uint32 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go index f941a178e6c..cf5c47eee75 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc/manager.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc @@ -47,6 +36,6 @@ func (s *ConfigManagerProxy) GetSamplingStrategy(ctx context.Context, serviceNam } // GetBaggageRestrictions returns baggage restrictions from collector. -func (s *ConfigManagerProxy) GetBaggageRestrictions(_ context.Context, _ string) ([]*baggage.BaggageRestriction, error) { +func (*ConfigManagerProxy) GetBaggageRestrictions(_ context.Context, _ string) ([]*baggage.BaggageRestriction, error) { return nil, errors.New("baggage not implemented") } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go index de12a5d6f74..0041ed9414b 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/manager.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package configmanager diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go index 08b64d76678..cec0f5d3c1a 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package configmanager diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go index 929457939ad..24058042b1f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/customtransport/buffered_read_transport.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package customtransport @@ -36,19 +25,19 @@ func NewTBufferedReadTransport(readBuf *bytes.Buffer) (*TBufferedReadTransport, // IsOpen does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) IsOpen() bool { +func (*TBufferedReadTransport) IsOpen() bool { return true } // Open does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Open() error { +func (*TBufferedReadTransport) Open() error { return nil } // Close does nothing as transport is not maintaining the connection // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Close() error { +func (*TBufferedReadTransport) Close() error { return nil } @@ -60,6 +49,7 @@ func (p *TBufferedReadTransport) Read(buf []byte) (int, error) { // RemainingBytes returns the number of bytes left to be read from the readBuf func (p *TBufferedReadTransport) RemainingBytes() uint64 { + //nolint: gosec // G115 return uint64(p.readBuf.Len()) } @@ -72,6 +62,6 @@ func (p *TBufferedReadTransport) Write(buf []byte) (int, error) { // Flush does nothing as udp server does not write responses back // Required to maintain thrift.TTransport interface -func (p *TBufferedReadTransport) Flush(_ context.Context) error { +func (*TBufferedReadTransport) Flush(_ context.Context) error { return nil } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go index 698bad33a7e..384b4327e19 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/httpserver/srv.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package httpserver diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go index 2ade7a29b52..947ca423104 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/processor.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package processors diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go index f16aea5cd86..ba3ca9d3f50 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/processors/thrift_processor.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package processors @@ -67,7 +56,7 @@ func NewThriftProcessor( "number of processors must be greater than 0, called with %d", numProcessors) } protocolPool := &sync.Pool{ - New: func() interface{} { + New: func() any { trans := &customtransport.TBufferedReadTransport{} return factory.GetProtocol(trans) }, diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go index c93b60f0b38..7b237582a5f 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/client_metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter @@ -143,7 +132,7 @@ func (r *ClientMetricsReporter) expireClientMetricsLoop() { func (r *ClientMetricsReporter) expireClientMetrics(t time.Time) { var size int64 - r.lastReceivedClientStats.Range(func(k, v interface{}) bool { + r.lastReceivedClientStats.Range(func(k, v any) bool { stats := v.(*lastReceivedClientStats) stats.lock.Lock() defer stats.lock.Unlock() @@ -186,12 +175,12 @@ func (r *ClientMetricsReporter) updateClientMetrics(batch *jaeger.Batch) { func (s *lastReceivedClientStats) update( batchSeqNo int64, stats *jaeger.ClientStats, - metrics *clientMetrics, + cMetrics *clientMetrics, ) { s.lock.Lock() defer s.lock.Unlock() - metrics.BatchesReceived.Inc(1) + cMetrics.BatchesReceived.Inc(1) if s.batchSeqNo >= batchSeqNo { // Ignore out of order batches. Once we receive a batch with a larger-than-seen number, @@ -202,11 +191,11 @@ func (s *lastReceivedClientStats) update( // do not update counters on the first batch, because it may cause a huge spike in totals // if the client has been running for a while already, but the agent just started. if s.batchSeqNo > 0 { - metrics.BatchesSent.Inc(batchSeqNo - s.batchSeqNo) + cMetrics.BatchesSent.Inc(batchSeqNo - s.batchSeqNo) if stats != nil { - metrics.FailedToEmitSpans.Inc(stats.FailedToEmitSpans - s.failedToEmitSpans) - metrics.TooLargeDroppedSpans.Inc(stats.TooLargeDroppedSpans - s.tooLargeDroppedSpans) - metrics.FullQueueDroppedSpans.Inc(stats.FullQueueDroppedSpans - s.fullQueueDroppedSpans) + cMetrics.FailedToEmitSpans.Inc(stats.FailedToEmitSpans - s.failedToEmitSpans) + cMetrics.TooLargeDroppedSpans.Inc(stats.TooLargeDroppedSpans - s.tooLargeDroppedSpans) + cMetrics.FullQueueDroppedSpans.Inc(stats.FullQueueDroppedSpans - s.fullQueueDroppedSpans) } } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go index 7af947eab1c..833f0c67f07 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/connect_metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go index c3ec9616789..5dca6444f18 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/flags.go @@ -1,22 +1,10 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter import ( "flag" - "fmt" "github.com/spf13/viper" "go.uber.org/zap" @@ -44,15 +32,15 @@ type Options struct { } // AddFlags adds flags for Options. -func AddFlags(flags *flag.FlagSet) { - flags.String(reporterType, string(GRPC), fmt.Sprintf("Reporter type to use e.g. %s", string(GRPC))) +func AddFlags(flagSet *flag.FlagSet) { + flagSet.String(reporterType, string(GRPC), "Reporter type to use e.g. "+string(GRPC)) if !setupcontext.IsAllInOne() { - flags.String(agentTags, "", "One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}") + flagSet.String(agentTags, "", "One or more tags to be added to the Process tags of all spans passing through this agent. Ex: key1=value1,key2=${envVar:defaultValue}") } } // InitFromViper initializes Options with properties retrieved from Viper. -func (b *Options) InitFromViper(v *viper.Viper, logger *zap.Logger) *Options { +func (b *Options) InitFromViper(v *viper.Viper, _ *zap.Logger) *Options { b.ReporterType = Type(v.GetString(reporterType)) if !setupcontext.IsAllInOne() { if len(v.GetString(agentTags)) > 0 { diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go index 9290107dd3a..a8953a8e644 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/builder.go @@ -1,16 +1,5 @@ // Copyright (c) 2018-2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc @@ -20,7 +9,7 @@ import ( "fmt" "strings" - grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -99,7 +88,7 @@ func (b *ConnBuilder) CreateConnection(ctx context.Context, logger *zap.Logger, } } dialOptions = append(dialOptions, grpc.WithDefaultServiceConfig(grpcresolver.GRPCServiceConfig)) - dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor(grpc_retry.WithMax(b.MaxRetry)))) + dialOptions = append(dialOptions, grpc.WithUnaryInterceptor(retry.UnaryClientInterceptor(retry.WithMax(b.MaxRetry)))) dialOptions = append(dialOptions, b.AdditionalDialOptions...) conn, err := grpc.NewClient(dialTarget, dialOptions...) diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go index ec41f729b06..554926459b0 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/collector_proxy.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go index 72289e4b2fd..47645c20cde 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/flags.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc @@ -27,7 +16,7 @@ import ( const ( gRPCPrefix = "reporter.grpc" collectorHostPort = gRPCPrefix + ".host-port" - retry = gRPCPrefix + ".retry.max" + retryFlag = gRPCPrefix + ".retry.max" defaultMaxRetry = 3 discoveryMinPeers = gRPCPrefix + ".discovery.min-peers" ) @@ -38,7 +27,7 @@ var tlsFlagsConfig = tlscfg.ClientFlagsConfig{ // AddFlags adds flags for Options. func AddFlags(flags *flag.FlagSet) { - flags.Uint(retry, defaultMaxRetry, "Sets the maximum number of retries for a call") + flags.Uint(retryFlag, defaultMaxRetry, "Sets the maximum number of retries for a call") flags.Int(discoveryMinPeers, 3, "Max number of collectors to which the agent will try to connect at any given time") flags.String(collectorHostPort, "", "Comma-separated string representing host:port of a static list of collectors to connect to directly") tlsFlagsConfig.AddFlags(flags) @@ -50,12 +39,12 @@ func (b *ConnBuilder) InitFromViper(v *viper.Viper) (*ConnBuilder, error) { if hostPorts != "" { b.CollectorHostPorts = strings.Split(hostPorts, ",") } - b.MaxRetry = uint(v.GetInt(retry)) - if tls, err := tlsFlagsConfig.InitFromViper(v); err == nil { - b.TLS = tls - } else { + b.MaxRetry = v.GetUint(retryFlag) + tls, err := tlsFlagsConfig.InitFromViper(v) + if err != nil { return b, fmt.Errorf("failed to process TLS options: %w", err) } + b.TLS = tls b.DiscoveryMinPeers = v.GetInt(discoveryMinPeers) return b, nil } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go index 20ba5241497..879a93aa482 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/grpc/reporter.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpc diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go index a1edaca36c2..9b29c3f59f4 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/metrics.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go index 550a4a2dd9b..0ce8fe95815 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/reporter/reporter.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package reporter diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go index 42d6f85fcf2..1a41e8be267 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/server.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package servers diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go index 6a9d49b45ad..a00e8f38609 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/tbuffered_server.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package servers @@ -79,7 +68,7 @@ func NewTBufferedServer( dataChan := make(chan *ReadBuf, maxQueueSize) readBufPool := &sync.Pool{ - New: func() interface{} { + New: func() any { return &ReadBuf{bytes: make([]byte, maxPacketSize)} }, } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go index 5dd4763593f..fe6eb10cf76 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build !windows // +build !windows diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go index dd763ec4c86..b4ad3b066d1 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/socket_buffer_windows.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package thriftudp diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go index 4579502061f..81db276ff07 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp/transport.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package thriftudp @@ -93,7 +82,7 @@ func NewTUDPServerTransport(hostPort string) (*TUDPTransport, error) { // Open does nothing as connection is opened on creation // Required to maintain thrift.TTransport interface -func (p *TUDPTransport) Open() error { +func (*TUDPTransport) Open() error { return nil } @@ -131,7 +120,7 @@ func (p *TUDPTransport) Read(buf []byte) (int, error) { // RemainingBytes returns the max number of bytes (same as Thrift's StreamTransport) as we // do not know how many bytes we have left. -func (p *TUDPTransport) RemainingBytes() uint64 { +func (*TUDPTransport) RemainingBytes() uint64 { const maxSize = ^uint64(0) return maxSize } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go b/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go index 2701bd88117..396c1bd2428 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext/setupcontext.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package setupcontext diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go index 32193039adc..9504506756c 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model/sampling.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go new file mode 100644 index 00000000000..243d08595ec --- /dev/null +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/factory.go @@ -0,0 +1,28 @@ +// Copyright (c) 2018 The Jaeger Authors. +// SPDX-License-Identifier: Apache-2.0 + +package samplingstrategy + +import ( + "go.uber.org/zap" + + "github.com/jaegertracing/jaeger/pkg/metrics" + "github.com/jaegertracing/jaeger/storage" +) + +// Factory defines an interface for a factory that can create implementations of different sampling strategy components. +// Implementations are also encouraged to implement plugin.Configurable interface. +// +// # See also +// +// plugin.Configurable +type Factory interface { + // Initialize performs internal initialization of the factory. + Initialize(metricsFactory metrics.Factory, ssFactory storage.SamplingStoreFactory, logger *zap.Logger) error + + // CreateStrategyProvider initializes and returns Provider and optionallty Aggregator. + CreateStrategyProvider() (Provider, Aggregator, error) + + // Close closes the factory + Close() error +} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/interface.go similarity index 56% rename from vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go rename to vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/interface.go index 90d9464918d..e47688a7319 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy/interface.go @@ -1,29 +1,20 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strategystore +// SPDX-License-Identifier: Apache-2.0 + +package samplingstrategy import ( "context" "io" + "go.uber.org/zap" + "github.com/jaegertracing/jaeger/model" "github.com/jaegertracing/jaeger/proto-gen/api_v2" ) -// StrategyStore keeps track of service specific sampling strategies. -type StrategyStore interface { +// Provider keeps track of service specific sampling strategies. +type Provider interface { // Close() from io.Closer stops the processor from calculating probabilities. io.Closer @@ -36,6 +27,10 @@ type Aggregator interface { // Close() from io.Closer stops the aggregator from aggregating throughput. io.Closer + // The HandleRootSpan function processes a span, checking if it's a root span. + // If it is, it extracts sampler parameters, then calls RecordThroughput. + HandleRootSpan(span *model.Span, logger *zap.Logger) + // RecordThroughput records throughput for an operation for aggregation. RecordThroughput(service, operation string, samplerType model.SamplerType, probability float64) diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go deleted file mode 100644 index 5fab77b62bd..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore/factory.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strategystore - -import ( - "go.uber.org/zap" - - "github.com/jaegertracing/jaeger/pkg/metrics" - "github.com/jaegertracing/jaeger/storage" -) - -// Factory defines an interface for a factory that can create implementations of different strategy storage components. -// Implementations are also encouraged to implement plugin.Configurable interface. -// -// # See also -// -// plugin.Configurable -type Factory interface { - // Initialize performs internal initialization of the factory. - Initialize(metricsFactory metrics.Factory, ssFactory storage.SamplingStoreFactory, logger *zap.Logger) error - - // CreateStrategyStore initializes the StrategyStore and returns it. - CreateStrategyStore() (StrategyStore, Aggregator, error) -} diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go index 8ba8a71f665..1e9fb29e250 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin/span_sanitizer.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin @@ -69,7 +58,7 @@ func NewSpanDurationSanitizer() Sanitizer { type spanDurationSanitizer struct{} -func (s *spanDurationSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*spanDurationSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.Duration == nil { duration := defaultDuration if len(span.Annotations) >= 2 { @@ -116,7 +105,7 @@ func NewSpanStartTimeSanitizer() Sanitizer { type spanStartTimeSanitizer struct{} -func (s *spanStartTimeSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*spanStartTimeSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.Timestamp != nil || len(span.Annotations) == 0 { return span } @@ -143,7 +132,7 @@ func NewParentIDSanitizer() Sanitizer { type parentIDSanitizer struct{} -func (s *parentIDSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*parentIDSanitizer) Sanitize(span *zc.Span) *zc.Span { if span.ParentID == nil || *span.ParentID != 0 { return span } @@ -166,7 +155,7 @@ func NewErrorTagSanitizer() Sanitizer { type errorTagSanitizer struct{} -func (s *errorTagSanitizer) Sanitize(span *zc.Span) *zc.Span { +func (*errorTagSanitizer) Sanitize(span *zc.Span) *zc.Span { for _, binAnno := range span.BinaryAnnotations { if binAnno.AnnotationType != zc.AnnotationType_BOOL && strings.EqualFold("error", binAnno.Key) { binAnno.AnnotationType = zc.AnnotationType_BOOL diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover deleted file mode 100644 index 46911e9377b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/.nocover +++ /dev/null @@ -1,2 +0,0 @@ -FIXME - diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go index ea4df675a04..e868eff7f3b 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/admin.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package flags @@ -170,6 +159,8 @@ func (s *AdminServer) registerPprofHandlers() { // Close stops the HTTP server func (s *AdminServer) Close() error { - _ = s.tlsCertWatcherCloser.Close() - return s.server.Shutdown(context.Background()) + return errors.Join( + s.tlsCertWatcherCloser.Close(), + s.server.Shutdown(context.Background()), + ) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go index a65f3a8a0cd..e7b6b877678 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package flags defines command line flags that are shared by several jaeger components. // They are defined in this shared location so that if several components are wired into diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go index 272ad8dd4fe..7eaac2f1794 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/flags.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package flags @@ -29,6 +18,7 @@ import ( const ( spanStorageType = "span-storage.type" // deprecated logLevel = "log-level" + logEncoding = "log-encoding" // json or console configFile = "config-file" ) @@ -98,23 +88,26 @@ type SharedFlags struct { } type logging struct { - Level string + Level string + Encoding string } // AddFlags adds flags for SharedFlags func AddFlags(flagSet *flag.FlagSet) { flagSet.String(spanStorageType, "", "(deprecated) please use SPAN_STORAGE_TYPE environment variable. Run this binary with the 'env' command for help.") - AddLoggingFlag(flagSet) + AddLoggingFlags(flagSet) } // AddLoggingFlag adds logging flag for SharedFlags -func AddLoggingFlag(flagSet *flag.FlagSet) { +func AddLoggingFlags(flagSet *flag.FlagSet) { flagSet.String(logLevel, "info", "Minimal allowed log Level. For more levels see https://github.com/uber-go/zap") + flagSet.String(logEncoding, "json", "Log encoding. Supported values are 'json' and 'console'.") } // InitFromViper initializes SharedFlags with properties from viper func (flags *SharedFlags) InitFromViper(v *viper.Viper) *SharedFlags { flags.Logging.Level = v.GetString(logLevel) + flags.Logging.Encoding = v.GetString(logEncoding) return flags } @@ -126,5 +119,9 @@ func (flags *SharedFlags) NewLogger(conf zap.Config, options ...zap.Option) (*za return nil, err } conf.Level = zap.NewAtomicLevelAt(level) + conf.Encoding = flags.Logging.Encoding + if flags.Logging.Encoding == "console" { + conf.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + } return conf.Build(options...) } diff --git a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go index 622a960a4e2..7a276d59d18 100644 --- a/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go +++ b/vendor/github.com/jaegertracing/jaeger/cmd/internal/flags/service.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package flags @@ -22,9 +11,10 @@ import ( "os/signal" "syscall" - grpcZap "github.com/grpc-ecosystem/go-grpc-middleware/logging/zap" "github.com/spf13/viper" "go.uber.org/zap" + "go.uber.org/zap/zapgrpc" + "google.golang.org/grpc/grpclog" "github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder" "github.com/jaegertracing/jaeger/pkg/healthcheck" @@ -67,7 +57,7 @@ func NewService(adminPort int) *Service { func (s *Service) AddFlags(flagSet *flag.FlagSet) { AddConfigFileFlag(flagSet) if s.NoStorage { - AddLoggingFlag(flagSet) + AddLoggingFlags(flagSet) } else { AddFlags(flagSet) } @@ -84,16 +74,15 @@ func (s *Service) Start(v *viper.Viper) error { sFlags := new(SharedFlags).InitFromViper(v) newProdConfig := zap.NewProductionConfig() newProdConfig.Sampling = nil - if logger, err := sFlags.NewLogger(newProdConfig); err == nil { - s.Logger = logger - grpcZap.ReplaceGrpcLoggerV2(logger.WithOptions( - // grpclog is not consistent with the depth of call tree before it's dispatched to zap, - // but Skip(2) still shows grpclog as caller, while Skip(3) shows actual grpc packages. - zap.AddCallerSkip(3), - )) - } else { + logger, err := sFlags.NewLogger(newProdConfig) + if err != nil { return fmt.Errorf("cannot create logger: %w", err) } + s.Logger = logger + grpclog.SetLoggerV2(zapgrpc.NewLogger( + logger.WithOptions( + zap.AddCallerSkip(5), // ensure the actual caller:lineNo is shown + ))) metricsBuilder := new(metricsbuilder.Builder).InitFromViper(v) metricsFactory, err := metricsBuilder.CreateMetricsFactory("") diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go deleted file mode 100644 index f25ddba563b..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/cache.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "sync" - - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -type cache struct { - lock sync.Mutex - counters map[string]metrics.Counter - gauges map[string]metrics.Gauge - timers map[string]metrics.Timer - histograms map[string]metrics.Histogram -} - -func newCache() *cache { - return &cache{ - counters: make(map[string]metrics.Counter), - gauges: make(map[string]metrics.Gauge), - timers: make(map[string]metrics.Timer), - histograms: make(map[string]metrics.Histogram), - } -} - -func (r *cache) getOrSetCounter(name string, create func() metrics.Counter) metrics.Counter { - r.lock.Lock() - defer r.lock.Unlock() - c, ok := r.counters[name] - if !ok { - c = create() - r.counters[name] = c - } - return c -} - -func (r *cache) getOrSetGauge(name string, create func() metrics.Gauge) metrics.Gauge { - r.lock.Lock() - defer r.lock.Unlock() - g, ok := r.gauges[name] - if !ok { - g = create() - r.gauges[name] = g - } - return g -} - -func (r *cache) getOrSetTimer(name string, create func() metrics.Timer) metrics.Timer { - r.lock.Lock() - defer r.lock.Unlock() - t, ok := r.timers[name] - if !ok { - t = create() - r.timers[name] = t - } - return t -} - -func (r *cache) getOrSetHistogram(name string, create func() metrics.Histogram) metrics.Histogram { - r.lock.Lock() - defer r.lock.Unlock() - t, ok := r.histograms[name] - if !ok { - t = create() - r.histograms[name] = t - } - return t -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go deleted file mode 100644 index 6ce562e5ef1..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/factory.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "sort" - - kexpvar "github.com/go-kit/kit/metrics/expvar" - - "github.com/jaegertracing/jaeger/pkg/metrics" -) - -// NewFactory creates a new metrics factory using go-kit expvar package. -// buckets is the number of buckets to be used in histograms. -// Custom buckets passed via options are not supported. -func NewFactory(buckets int) metrics.Factory { - return &factory{ - buckets: buckets, - scope: "", - scopeSep: ".", - tagsSep: ".", - tagKVSep: "_", - cache: newCache(), - } -} - -type factory struct { - buckets int - - scope string - tags map[string]string - scopeSep string - tagsSep string - tagKVSep string - cache *cache -} - -var _ metrics.Factory = (*factory)(nil) - -func (f *factory) subScope(name string) string { - if f.scope == "" { - return name - } - if name == "" { - return f.scope - } - return f.scope + f.scopeSep + name -} - -func (f *factory) mergeTags(tags map[string]string) map[string]string { - ret := make(map[string]string, len(f.tags)+len(tags)) - for k, v := range f.tags { - ret[k] = v - } - for k, v := range tags { - ret[k] = v - } - return ret -} - -func (f *factory) getKey(name string, tags map[string]string) string { - fullName := f.subScope(name) - fullTags := f.mergeTags(tags) - return makeKey(fullName, fullTags, f.tagsSep, f.tagKVSep) -} - -// getKey converts name+tags into a single string of the form -// "name|tag1=value1|...|tagN=valueN", where tag names are -// sorted alphabetically. -func makeKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string { - keys := make([]string, 0, len(tags)) - for k := range tags { - keys = append(keys, k) - } - sort.Strings(keys) - key := name - for _, k := range keys { - key = key + tagsSep + k + tagKVSep + tags[k] - } - return key -} - -func (f *factory) Counter(options metrics.Options) metrics.Counter { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetCounter(key, func() metrics.Counter { - return NewCounter(kexpvar.NewCounter(key)) - }) -} - -func (f *factory) Gauge(options metrics.Options) metrics.Gauge { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetGauge(key, func() metrics.Gauge { - return NewGauge(kexpvar.NewGauge(key)) - }) -} - -func (f *factory) Timer(options metrics.TimerOptions) metrics.Timer { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetTimer(key, func() metrics.Timer { - return NewTimer(kexpvar.NewHistogram(key, f.buckets)) - }) -} - -func (f *factory) Histogram(options metrics.HistogramOptions) metrics.Histogram { - key := f.getKey(options.Name, options.Tags) - return f.cache.getOrSetHistogram(key, func() metrics.Histogram { - return NewHistogram(kexpvar.NewHistogram(key, f.buckets)) - }) -} - -func (f *factory) Namespace(options metrics.NSOptions) metrics.Factory { - return &factory{ - buckets: f.buckets, - scope: f.subScope(options.Name), - tags: f.mergeTags(options.Tags), - scopeSep: f.scopeSep, - tagsSep: f.tagsSep, - tagKVSep: f.tagKVSep, - cache: f.cache, - } -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go deleted file mode 100644 index f3cbf740b1a..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/expvar/metrics.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2022 The Jaeger Authors. -// Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "time" - - kit "github.com/go-kit/kit/metrics" -) - -// Counter is an adapter from go-kit Counter to jaeger-lib Counter -type Counter struct { - counter kit.Counter -} - -// NewCounter creates a new Counter -func NewCounter(counter kit.Counter) *Counter { - return &Counter{counter: counter} -} - -// Inc adds the given value to the counter. -func (c *Counter) Inc(delta int64) { - c.counter.Add(float64(delta)) -} - -// Gauge is an adapter from go-kit Gauge to jaeger-lib Gauge -type Gauge struct { - gauge kit.Gauge -} - -// NewGauge creates a new Gauge -func NewGauge(gauge kit.Gauge) *Gauge { - return &Gauge{gauge: gauge} -} - -// Update the gauge to the value passed in. -func (g *Gauge) Update(value int64) { - g.gauge.Set(float64(value)) -} - -// Timer is an adapter from go-kit Histogram to jaeger-lib Timer -type Timer struct { - hist kit.Histogram -} - -// NewTimer creates a new Timer -func NewTimer(hist kit.Histogram) *Timer { - return &Timer{hist: hist} -} - -// Record saves the time passed in. -func (t *Timer) Record(delta time.Duration) { - t.hist.Observe(delta.Seconds()) -} - -// Histogram is an adapter from go-kit Histogram to jaeger-lib Histogram -type Histogram struct { - hist kit.Histogram -} - -// NewHistogram creates a new Histogram -func NewHistogram(hist kit.Histogram) *Histogram { - return &Histogram{hist: hist} -} - -// Record saves the value passed in. -func (t *Histogram) Record(value float64) { - t.hist.Observe(value) -} diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go index 502b7a4aa3d..16c82f117b0 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder/builder.go @@ -1,32 +1,18 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metricsbuilder import ( "errors" - "expvar" "flag" - "log" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/spf13/viper" - jexpvar "github.com/jaegertracing/jaeger/internal/metrics/expvar" jprom "github.com/jaegertracing/jaeger/internal/metrics/prometheus" "github.com/jaegertracing/jaeger/pkg/metrics" ) @@ -47,14 +33,12 @@ type Builder struct { handler http.Handler } -const expvarDepr = "(deprecated, will be removed after 2024-01-01 or in release v1.53.0, whichever is later) " - // AddFlags adds flags for Builder. func AddFlags(flags *flag.FlagSet) { flags.String( metricsBackend, defaultMetricsBackend, - "Defines which metrics backend to use for metrics reporting: prometheus, none, or expvar "+expvarDepr) + "Defines which metrics backend to use for metrics reporting: prometheus or none") flags.String( metricsHTTPRoute, defaultMetricsRoute, @@ -77,12 +61,6 @@ func (b *Builder) CreateMetricsFactory(namespace string) (metrics.Factory, error b.handler = promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{DisableCompression: true}) return metricsFactory, nil } - if b.Backend == "expvar" { - metricsFactory := jexpvar.NewFactory(10).Namespace(metrics.NSOptions{Name: namespace, Tags: nil}) - b.handler = expvar.Handler() - log.Printf("using expvar as metrics backend " + expvarDepr) - return metricsFactory, nil - } if b.Backend == "none" || b.Backend == "" { return metrics.NullFactory, nil } diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go index 40791ebb708..733c9e3f33e 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/cache.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package prometheus @@ -81,6 +70,6 @@ func (c *vectorCache) getOrMakeHistogramVec(opts prometheus.HistogramOpts, label return hv } -func (c *vectorCache) getCacheKey(name string, labels []string) string { +func (*vectorCache) getCacheKey(name string, labels []string) string { return strings.Join(append([]string{name}, labels...), "||") } diff --git a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go index 0f8b3a959d7..6bb4c8faceb 100644 --- a/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/internal/metrics/prometheus/factory.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package prometheus @@ -280,7 +269,7 @@ func (f *Factory) mergeTags(tags map[string]string) map[string]string { return ret } -func (f *Factory) tagNames(tags map[string]string) []string { +func (*Factory) tagNames(tags map[string]string) []string { ret := make([]string, 0, len(tags)) for k := range tags { ret = append(ret, k) @@ -289,7 +278,7 @@ func (f *Factory) tagNames(tags map[string]string) []string { return ret } -func (f *Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string { +func (*Factory) tagsAsLabelValues(labels []string, tags map[string]string) []string { ret := make([]string, 0, len(tags)) for _, l := range labels { ret = append(ret, tags[l]) diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go index 547372cb96a..50b692bacbf 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/json/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package json allows converting model.Trace to external JSON data model. package json diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go index 26c2e47d6f0..fb0754424c7 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/json/from_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package json @@ -110,17 +99,17 @@ func (fd fromDomain) convertReferences(span *model.Span) []json.Reference { return out } -func (fd fromDomain) convertRefType(refType model.SpanRefType) json.ReferenceType { +func (fromDomain) convertRefType(refType model.SpanRefType) json.ReferenceType { if refType == model.FollowsFrom { return json.FollowsFrom } return json.ChildOf } -func (fd fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue { +func (fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue { out := make([]json.KeyValue, len(keyValues)) for i, kv := range keyValues { - var value interface{} + var value any switch kv.VType { case model.StringType: value = kv.VStr @@ -146,7 +135,7 @@ func (fd fromDomain) convertKeyValues(keyValues model.KeyValues) []json.KeyValue return out } -func (fd fromDomain) convertKeyValuesString(keyValues model.KeyValues) []json.KeyValue { +func (fromDomain) convertKeyValuesString(keyValues model.KeyValues) []json.KeyValue { out := make([]json.KeyValue, len(keyValues)) for i, kv := range keyValues { out[i] = json.KeyValue{ diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go index 5a95af66f9e..fb6eaf4c3de 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/json/process_hashtable.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package json diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go b/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go index b124114b6a5..829670d146f 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/json/sampling.go @@ -1,16 +1,5 @@ // Copyright (c) 2023 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package json diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go index 26050003d63..4ed14a3000e 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package jaeger allows converting model.Trace to/from jaeger.thrift model. package jaeger diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go index 8ae83e02c57..b115c361504 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/from_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger @@ -44,7 +33,7 @@ func FromDomainSpan(span *model.Span) *jaeger.Span { type domainToJaegerTransformer struct{} -func (d domainToJaegerTransformer) keyValueToTag(kv *model.KeyValue) *jaeger.Tag { +func (domainToJaegerTransformer) keyValueToTag(kv *model.KeyValue) *jaeger.Tag { if kv.VType == model.StringType { stringValue := kv.VStr return &jaeger.Tag{ @@ -112,6 +101,7 @@ func (d domainToJaegerTransformer) convertLogs(logs []model.Log) []*jaeger.Log { jaegerLogs := make([]*jaeger.Log, len(logs)) for idx, log := range logs { jaegerLogs[idx] = &jaeger.Log{ + //nolint: gosec // G115 Timestamp: int64(model.TimeAsEpochMicroseconds(log.Timestamp)), Fields: d.convertKeyValuesToTags(log.Fields), } @@ -119,9 +109,10 @@ func (d domainToJaegerTransformer) convertLogs(logs []model.Log) []*jaeger.Log { return jaegerLogs } -func (d domainToJaegerTransformer) convertSpanRefs(refs []model.SpanRef) []*jaeger.SpanRef { +func (domainToJaegerTransformer) convertSpanRefs(refs []model.SpanRef) []*jaeger.SpanRef { jaegerSpanRefs := make([]*jaeger.SpanRef, len(refs)) for idx, ref := range refs { + //nolint: gosec // G115 jaegerSpanRefs[idx] = &jaeger.SpanRef{ RefType: jaeger.SpanRefType(ref.RefType), TraceIdLow: int64(ref.TraceID.Low), @@ -137,6 +128,7 @@ func (d domainToJaegerTransformer) transformSpan(span *model.Span) *jaeger.Span logs := d.convertLogs(span.Logs) refs := d.convertSpanRefs(span.References) + //nolint: gosec // G115 jaegerSpan := &jaeger.Span{ TraceIdLow: int64(span.TraceID.Low), TraceIdHigh: int64(span.TraceID.High), diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go index c776072cd19..d456b018921 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_from_domain.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger @@ -55,7 +44,10 @@ func convertRateLimitingFromDomain(s *api_v2.RateLimitingSamplingStrategy) (*sam if s.MaxTracesPerSecond > math.MaxInt16 { return nil, errors.New("maxTracesPerSecond is higher than int16") } - return &sampling.RateLimitingSamplingStrategy{MaxTracesPerSecond: int16(s.GetMaxTracesPerSecond())}, nil + return &sampling.RateLimitingSamplingStrategy{ + //nolint: gosec // G115 + MaxTracesPerSecond: int16(s.GetMaxTracesPerSecond()), + }, nil } func convertPerOperationFromDomain(s *api_v2.PerOperationSamplingStrategies) *sampling.PerOperationSamplingStrategies { diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go index 876e50a4a83..23cea6ccbf2 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/sampling_to_domain.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go index 2738776123f..b3d082ea5a4 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/jaeger/to_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package jaeger @@ -58,6 +47,7 @@ func (td toDomain) ToDomainSpan(jSpan *jaeger.Span, jProcess *jaeger.Process) *m } func (td toDomain) transformSpan(jSpan *jaeger.Span, mProcess *model.Process) *model.Span { + //nolint: gosec // G115 traceID := model.NewTraceID(uint64(jSpan.TraceIdHigh), uint64(jSpan.TraceIdLow)) // allocate extra space for future append operation tags := td.getTags(jSpan.Tags, 1) @@ -66,24 +56,29 @@ func (td toDomain) transformSpan(jSpan *jaeger.Span, mProcess *model.Process) *m // might still have these IDs without representing them in the References, so we // convert it back into child-of reference. if jSpan.ParentSpanId != 0 { + //nolint: gosec // G115 parentSpanID := model.NewSpanID(uint64(jSpan.ParentSpanId)) refs = model.MaybeAddParentSpanID(traceID, parentSpanID, refs) } return &model.Span{ - TraceID: traceID, + TraceID: traceID, + //nolint: gosec // G115 SpanID: model.NewSpanID(uint64(jSpan.SpanId)), OperationName: jSpan.OperationName, References: refs, - Flags: model.Flags(jSpan.Flags), - StartTime: model.EpochMicrosecondsAsTime(uint64(jSpan.StartTime)), - Duration: model.MicrosecondsAsDuration(uint64(jSpan.Duration)), - Tags: tags, - Logs: td.getLogs(jSpan.Logs), - Process: mProcess, + //nolint: gosec // G115 + Flags: model.Flags(jSpan.Flags), + //nolint: gosec // G115 + StartTime: model.EpochMicrosecondsAsTime(uint64(jSpan.StartTime)), + //nolint: gosec // G115 + Duration: model.MicrosecondsAsDuration(uint64(jSpan.Duration)), + Tags: tags, + Logs: td.getLogs(jSpan.Logs), + Process: mProcess, } } -func (td toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { +func (toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { if len(jRefs) == 0 { return nil } @@ -91,9 +86,12 @@ func (td toDomain) getReferences(jRefs []*jaeger.SpanRef) []model.SpanRef { mRefs := make([]model.SpanRef, len(jRefs)) for idx, jRef := range jRefs { mRefs[idx] = model.SpanRef{ + //nolint: gosec // G115 RefType: model.SpanRefType(int(jRef.RefType)), + //nolint: gosec // G115 TraceID: model.NewTraceID(uint64(jRef.TraceIdHigh), uint64(jRef.TraceIdLow)), - SpanID: model.NewSpanID(uint64(jRef.SpanId)), + //nolint: gosec // G115 + SpanID: model.NewSpanID(uint64(jRef.SpanId)), } } @@ -127,7 +125,7 @@ func (td toDomain) getTags(tags []*jaeger.Tag, extraSpace int) model.KeyValues { return retMe } -func (td toDomain) getTag(tag *jaeger.Tag) model.KeyValue { +func (toDomain) getTag(tag *jaeger.Tag) model.KeyValue { switch tag.VType { case jaeger.TagType_BOOL: return model.Bool(tag.Key, tag.GetVBool()) @@ -151,6 +149,7 @@ func (td toDomain) getLogs(logs []*jaeger.Log) []model.Log { retMe := make([]model.Log, len(logs)) for i, log := range logs { retMe[i] = model.Log{ + //nolint: gosec // G115 Timestamp: model.EpochMicrosecondsAsTime(uint64(log.Timestamp)), Fields: td.getTags(log.Fields, 0), } diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go index 821cf3914fb..7f581895e30 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/deserialize.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go index 7b7b461270a..4f47098fd16 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package zipkin allows converting model.Trace to/from zipkin.thrift model. package zipkin diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go index cd75fca2b0c..05c5a32d8c8 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/process_hashtable.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin diff --git a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go index f4dd3655f74..c287d958c1a 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go +++ b/vendor/github.com/jaegertracing/jaeger/model/converter/thrift/zipkin/to_domain.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package zipkin @@ -92,7 +81,7 @@ type toDomain struct{} func (td toDomain) ToDomain(zSpans []*zipkincore.Span) (*model.Trace, error) { var errs []error processes := newProcessHashtable() - trace := &model.Trace{} + trc := &model.Trace{} for _, zSpan := range zSpans { jSpans, err := td.ToDomainSpans(zSpan) if err != nil { @@ -101,10 +90,10 @@ func (td toDomain) ToDomain(zSpans []*zipkincore.Span) (*model.Trace, error) { for _, jSpan := range jSpans { // remove duplicate Process instances jSpan.Process = processes.add(jSpan.Process) - trace.Spans = append(trace.Spans, jSpan) + trc.Spans = append(trc.Spans, jSpan) } } - return trace, errors.Join(errs...) + return trc, errors.Join(errs...) } func (td toDomain) ToDomainSpans(zSpan *zipkincore.Span) ([]*model.Span, error) { @@ -116,7 +105,7 @@ func (td toDomain) ToDomainSpans(zSpan *zipkincore.Span) ([]*model.Span, error) return jSpans, err } -func (td toDomain) findAnnotation(zSpan *zipkincore.Span, value string) *zipkincore.Annotation { +func (toDomain) findAnnotation(zSpan *zipkincore.Span, value string) *zipkincore.Annotation { for _, ann := range zSpan.Annotations { if ann.Value == value { return ann @@ -135,9 +124,11 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { if zSpan.TraceIDHigh != nil { traceIDHigh = *zSpan.TraceIDHigh } + //nolint: gosec // G115 traceID := model.NewTraceID(uint64(traceIDHigh), uint64(zSpan.TraceID)) var refs []model.SpanRef if zSpan.ParentID != nil { + //nolint: gosec // G115 parentSpanID := model.NewSpanID(uint64(*zSpan.ParentID)) refs = model.MaybeAddParentSpanID(traceID, parentSpanID, refs) } @@ -147,15 +138,18 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { startTime, duration := td.getStartTimeAndDuration(zSpan) result := []*model.Span{{ - TraceID: traceID, + TraceID: traceID, + //nolint: gosec // G115 SpanID: model.NewSpanID(uint64(zSpan.ID)), OperationName: zSpan.Name, References: refs, Flags: flags, - StartTime: model.EpochMicrosecondsAsTime(uint64(startTime)), - Duration: model.MicrosecondsAsDuration(uint64(duration)), - Tags: tags, - Logs: td.getLogs(zSpan.Annotations), + //nolint: gosec // G115 + StartTime: model.EpochMicrosecondsAsTime(uint64(startTime)), + //nolint: gosec // G115 + Duration: model.MicrosecondsAsDuration(uint64(duration)), + Tags: tags, + Logs: td.getLogs(zSpan.Annotations), }} cs := td.findAnnotation(zSpan, zipkincore.CLIENT_SEND) @@ -163,7 +157,8 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { if cs != nil && sr != nil { // if the span is client and server we split it into two separate spans s := &model.Span{ - TraceID: traceID, + TraceID: traceID, + //nolint: gosec // G115 SpanID: model.NewSpanID(uint64(zSpan.ID)), OperationName: zSpan.Name, References: refs, @@ -172,14 +167,18 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { // if the first span is a client span we create server span and vice-versa. if result[0].IsRPCClient() { s.Tags = []model.KeyValue{model.String(keySpanKind, trace.SpanKindServer.String())} + //nolint: gosec // G115 s.StartTime = model.EpochMicrosecondsAsTime(uint64(sr.Timestamp)) if ss := td.findAnnotation(zSpan, zipkincore.SERVER_SEND); ss != nil { + //nolint: gosec // G115 s.Duration = model.MicrosecondsAsDuration(uint64(ss.Timestamp - sr.Timestamp)) } } else { s.Tags = []model.KeyValue{model.String(keySpanKind, trace.SpanKindClient.String())} + //nolint: gosec // G115 s.StartTime = model.EpochMicrosecondsAsTime(uint64(cs.Timestamp)) if cr := td.findAnnotation(zSpan, zipkincore.CLIENT_RECV); cr != nil { + //nolint: gosec // G115 s.Duration = model.MicrosecondsAsDuration(uint64(cr.Timestamp - cs.Timestamp)) } } @@ -189,7 +188,7 @@ func (td toDomain) transformSpan(zSpan *zipkincore.Span) []*model.Span { } // getFlags takes a Zipkin Span and deduces the proper flags settings -func (td toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { +func (toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { f := model.Flags(0) if zSpan.Debug { f.SetDebug() @@ -198,9 +197,9 @@ func (td toDomain) getFlags(zSpan *zipkincore.Span) model.Flags { } // Get a correct start time to use for the span if it's not set directly -func (td toDomain) getStartTimeAndDuration(zSpan *zipkincore.Span) (int64, int64) { - timestamp := zSpan.GetTimestamp() - duration := zSpan.GetDuration() +func (td toDomain) getStartTimeAndDuration(zSpan *zipkincore.Span) (timestamp, duration int64) { + timestamp = zSpan.GetTimestamp() + duration = zSpan.GetDuration() if timestamp == 0 { cs := td.findAnnotation(zSpan, zipkincore.CLIENT_SEND) sr := td.findAnnotation(zSpan, zipkincore.SERVER_RECV) @@ -231,6 +230,7 @@ func (td toDomain) generateProcess(zSpan *zipkincore.Span) (*model.Process, erro serviceName, ipv4, err := td.findServiceNameAndIP(zSpan) if ipv4 != 0 { // If the ip process tag already exists, don't add it again + //nolint: gosec // G115 tags = append(tags, model.Int64(IPTagName, int64(uint64(ipv4)))) } return model.NewProcess(serviceName, tags), err @@ -261,16 +261,17 @@ func (td toDomain) findServiceNameAndIP(zSpan *zipkincore.Span) (string, int32, } err := fmt.Errorf( "cannot find service name in Zipkin span [traceID=%x, spanID=%x]", + //nolint: gosec // G115 uint64(zSpan.TraceID), uint64(zSpan.ID)) return UnknownServiceName, 0, err } -func (td toDomain) isCoreAnnotation(annotation *zipkincore.Annotation) bool { +func (toDomain) isCoreAnnotation(annotation *zipkincore.Annotation) bool { _, ok := coreAnnotations[annotation.Value] return ok } -func (td toDomain) isProcessTag(binaryAnnotation *zipkincore.BinaryAnnotation) bool { +func (toDomain) isProcessTag(binaryAnnotation *zipkincore.BinaryAnnotation) bool { _, ok := processTagAnnotations[binaryAnnotation.Key] return ok } @@ -309,7 +310,7 @@ func (td toDomain) getTags(binAnnotations []*zipkincore.BinaryAnnotation, tagInc return retMe } -func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { +func (toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.BinaryAnnotation) (model.KeyValue, error) { switch binaryAnnotation.AnnotationType { case zipkincore.AnnotationType_BOOL: vBool := bytes.Equal(binaryAnnotation.Value, trueByteSlice) @@ -346,7 +347,7 @@ func (td toDomain) transformBinaryAnnotation(binaryAnnotation *zipkincore.Binary return model.KeyValue{}, fmt.Errorf("unknown zipkin annotation type: %d", binaryAnnotation.AnnotationType) } -func bytesToNumber(b []byte, number interface{}) error { +func bytesToNumber(b []byte, number any) error { buf := bytes.NewReader(b) return binary.Read(buf, binary.BigEndian, number) } @@ -364,6 +365,7 @@ func (td toDomain) getLogs(annotations []*zipkincore.Annotation) []model.Log { } logFields := td.getLogFields(a) jLog := model.Log{ + //nolint: gosec // G115 Timestamp: model.EpochMicrosecondsAsTime(uint64(a.Timestamp)), Fields: logFields, } @@ -372,7 +374,7 @@ func (td toDomain) getLogs(annotations []*zipkincore.Annotation) []model.Log { return retMe } -func (td toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyValue { +func (toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyValue { var logFields map[string]string // Since Zipkin format does not support kv-logging, some clients encode those Logs // as annotations with JSON value. Therefore, we try JSON decoding first. @@ -388,7 +390,7 @@ func (td toDomain) getLogFields(annotation *zipkincore.Annotation) []model.KeyVa return []model.KeyValue{model.String(DefaultLogFieldKey, annotation.Value)} } -func (td toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.KeyValue, bool) { +func (toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.KeyValue, bool) { for _, a := range annotations { if spanKind, ok := coreAnnotations[a.Value]; ok { return model.String(keySpanKind, spanKind), true @@ -397,12 +399,13 @@ func (td toDomain) getSpanKindTag(annotations []*zipkincore.Annotation) (model.K return model.KeyValue{}, false } -func (td toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyValue) []model.KeyValue { +func (toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyValue) []model.KeyValue { if endpoint == nil { return tags } tags = append(tags, model.String(peerservice, endpoint.ServiceName)) if endpoint.Ipv4 != 0 { + //nolint: gosec // G115 ipv4 := int64(uint32(endpoint.Ipv4)) tags = append(tags, model.Int64(peerHostIPv4, ipv4)) } @@ -412,6 +415,7 @@ func (td toDomain) getPeerTags(endpoint *zipkincore.Endpoint, tags []model.KeyVa tags = append(tags, model.Binary(peerHostIPv6, endpoint.Ipv6)) } if endpoint.Port != 0 { + //nolint: gosec // G115 port := int64(uint16(endpoint.Port)) tags = append(tags, model.Int64(peerPort, port)) } diff --git a/vendor/github.com/jaegertracing/jaeger/model/dependencies.go b/vendor/github.com/jaegertracing/jaeger/model/dependencies.go index d1a86ba0d7c..8a0f2745b1f 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/dependencies.go +++ b/vendor/github.com/jaegertracing/jaeger/model/dependencies.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -22,8 +11,9 @@ const ( // ApplyDefaults applies defaults to the DependencyLink. func (d DependencyLink) ApplyDefaults() DependencyLink { - if d.Source == "" { - d.Source = JaegerDependencyLinkSource + dd := d + if dd.Source == "" { + dd.Source = JaegerDependencyLinkSource } - return d + return dd } diff --git a/vendor/github.com/jaegertracing/jaeger/model/doc.go b/vendor/github.com/jaegertracing/jaeger/model/doc.go index b44aea615a6..bb8daf13905 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package model describes the internal data model for Trace and Span package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/hash.go b/vendor/github.com/jaegertracing/jaeger/model/hash.go index 890843a8aaf..5b4a4dc442a 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/hash.go +++ b/vendor/github.com/jaegertracing/jaeger/model/hash.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/ids.go b/vendor/github.com/jaegertracing/jaeger/model/ids.go index dfdc9070574..27c7d3846b0 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/ids.go +++ b/vendor/github.com/jaegertracing/jaeger/model/ids.go @@ -1,23 +1,13 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model import ( "encoding/base64" "encoding/binary" + "errors" "fmt" "strconv" @@ -87,23 +77,23 @@ func TraceIDFromBytes(data []byte) (TraceID, error) { case len(data) == traceIDShortBytesLen: t.Low = binary.BigEndian.Uint64(data) default: - return TraceID{}, fmt.Errorf("invalid length for TraceID") + return TraceID{}, errors.New("invalid length for TraceID") } return t, nil } // MarshalText is called by encoding/json, which we do not want people to use. -func (t TraceID) MarshalText() ([]byte, error) { - return nil, fmt.Errorf("unsupported method TraceID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (TraceID) MarshalText() ([]byte, error) { + return nil, errors.New("unsupported method TraceID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // UnmarshalText is called by encoding/json, which we do not want people to use. -func (t *TraceID) UnmarshalText(text []byte) error { - return fmt.Errorf("unsupported method TraceID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (*TraceID) UnmarshalText([]byte /* text */) error { + return errors.New("unsupported method TraceID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // Size returns the size of this datum in protobuf. It is always 16 bytes. -func (t *TraceID) Size() int { +func (*TraceID) Size() int { return 16 } @@ -124,7 +114,7 @@ func (t *TraceID) Unmarshal(data []byte) error { func marshalBytes(dst []byte, src []byte) (n int, err error) { if len(dst) < len(src) { - return 0, fmt.Errorf("buffer is too short") + return 0, errors.New("buffer is too short") } return copy(dst, src), nil } @@ -181,23 +171,23 @@ func SpanIDFromString(s string) (SpanID, error) { // SpanIDFromBytes creates a SpandID from list of bytes func SpanIDFromBytes(data []byte) (SpanID, error) { if len(data) != traceIDShortBytesLen { - return SpanID(0), fmt.Errorf("invalid length for SpanID") + return SpanID(0), errors.New("invalid length for SpanID") } return NewSpanID(binary.BigEndian.Uint64(data)), nil } // MarshalText is called by encoding/json, which we do not want people to use. -func (s SpanID) MarshalText() ([]byte, error) { - return nil, fmt.Errorf("unsupported method SpanID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (SpanID) MarshalText() ([]byte, error) { + return nil, errors.New("unsupported method SpanID.MarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // UnmarshalText is called by encoding/json, which we do not want people to use. -func (s *SpanID) UnmarshalText(text []byte) error { - return fmt.Errorf("unsupported method SpanID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") +func (*SpanID) UnmarshalText([]byte /* text */) error { + return errors.New("unsupported method SpanID.UnmarshalText; please use github.com/gogo/protobuf/jsonpb for marshalling") } // Size returns the size of this datum in protobuf. It is always 8 bytes. -func (s *SpanID) Size() int { +func (*SpanID) Size() int { return 8 } diff --git a/vendor/github.com/jaegertracing/jaeger/model/json/doc.go b/vendor/github.com/jaegertracing/jaeger/model/json/doc.go index ba31238980d..514332ca9ce 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/json/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/model/json/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package json defines the external JSON representation for Jaeger traces. package json diff --git a/vendor/github.com/jaegertracing/jaeger/model/json/model.go b/vendor/github.com/jaegertracing/jaeger/model/json/model.go index 8d72dc498e5..6ee34f93fc7 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/json/model.go +++ b/vendor/github.com/jaegertracing/jaeger/model/json/model.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package json @@ -98,9 +87,9 @@ type Log struct { // KeyValue is a key-value pair with typed value. type KeyValue struct { - Key string `json:"key"` - Type ValueType `json:"type,omitempty"` - Value interface{} `json:"value"` + Key string `json:"key"` + Type ValueType `json:"type,omitempty"` + Value any `json:"value"` } // DependencyLink shows dependencies between services diff --git a/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go b/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go index 44f3e1bdb03..68fc91ea3a4 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go +++ b/vendor/github.com/jaegertracing/jaeger/model/keyvalue.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -102,8 +91,8 @@ func (kv *KeyValue) Binary() []byte { return nil } -// Value returns typed values stored in KeyValue as interface{}. -func (kv *KeyValue) Value() interface{} { +// Value returns typed values stored in KeyValue as any. +func (kv *KeyValue) Value() any { switch kv.VType { case StringType: return kv.VStr @@ -210,6 +199,7 @@ func (kv KeyValue) Hash(w io.Writer) error { if _, err := w.Write([]byte(kv.Key)); err != nil { return err } + //nolint: gosec // G115 if err := binary.Write(w, binary.BigEndian, uint16(kv.VType)); err != nil { return err } diff --git a/vendor/github.com/jaegertracing/jaeger/model/process.go b/vendor/github.com/jaegertracing/jaeger/model/process.go index 1babaeb1823..f2a73151886 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/process.go +++ b/vendor/github.com/jaegertracing/jaeger/model/process.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/sort.go b/vendor/github.com/jaegertracing/jaeger/model/sort.go index b2556d8cc4c..91d82219085 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/sort.go +++ b/vendor/github.com/jaegertracing/jaeger/model/sort.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/span.go b/vendor/github.com/jaegertracing/jaeger/model/span.go index 833763efe90..51d96cae40a 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/span.go +++ b/vendor/github.com/jaegertracing/jaeger/model/span.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/spanref.go b/vendor/github.com/jaegertracing/jaeger/model/spanref.go index 92acc21e9a3..39d0e13820d 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/spanref.go +++ b/vendor/github.com/jaegertracing/jaeger/model/spanref.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/model/time.go b/vendor/github.com/jaegertracing/jaeger/model/time.go index 34a66a7efc4..153e52b7609 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/time.go +++ b/vendor/github.com/jaegertracing/jaeger/model/time.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model @@ -23,22 +12,26 @@ import ( func EpochMicrosecondsAsTime(ts uint64) time.Time { seconds := ts / 1000000 nanos := 1000 * (ts % 1000000) + //nolint: gosec // G115 return time.Unix(int64(seconds), int64(nanos)).UTC() } // TimeAsEpochMicroseconds converts time.Time to microseconds since epoch, // which is the format the StartTime field is stored in the Span. func TimeAsEpochMicroseconds(t time.Time) uint64 { + //nolint: gosec // G115 return uint64(t.UnixNano() / 1000) } // MicrosecondsAsDuration converts duration in microseconds to time.Duration value. func MicrosecondsAsDuration(v uint64) time.Duration { + //nolint: gosec // G115 return time.Duration(v) * time.Microsecond } // DurationAsMicroseconds converts time.Duration to microseconds, // which is the format the Duration field is stored in the Span. func DurationAsMicroseconds(d time.Duration) uint64 { + //nolint: gosec // G115 return uint64(d.Nanoseconds() / 1000) } diff --git a/vendor/github.com/jaegertracing/jaeger/model/trace.go b/vendor/github.com/jaegertracing/jaeger/model/trace.go index fcc31b07ae3..234e2adb143 100644 --- a/vendor/github.com/jaegertracing/jaeger/model/trace.go +++ b/vendor/github.com/jaegertracing/jaeger/model/trace.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package model diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go b/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go deleted file mode 100644 index 4f9221fe631..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/context.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bearertoken - -import "context" - -type contextKeyType int - -const contextKey = contextKeyType(iota) - -// StoragePropagationKey is a key for viper configuration to pass this option to storage plugins. -const StoragePropagationKey = "storage.propagate.token" - -// ContextWithBearerToken set bearer token in context. -func ContextWithBearerToken(ctx context.Context, token string) context.Context { - if token == "" { - return ctx - } - return context.WithValue(ctx, contextKey, token) -} - -// GetBearerToken from context, or empty string if there is no token. -func GetBearerToken(ctx context.Context) (string, bool) { - val, ok := ctx.Value(contextKey).(string) - return val, ok -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go b/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go deleted file mode 100644 index 8e264f9b444..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/http.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bearertoken - -import ( - "net/http" - "strings" - - "go.uber.org/zap" -) - -// PropagationHandler returns a http.Handler containing the logic to extract -// the Bearer token from the Authorization header of the http.Request and insert it into request.Context -// for propagation. The token can be accessed via GetBearerToken. -func PropagationHandler(logger *zap.Logger, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - authHeaderValue := r.Header.Get("Authorization") - // If no Authorization header is present, try with X-Forwarded-Access-Token - if authHeaderValue == "" { - authHeaderValue = r.Header.Get("X-Forwarded-Access-Token") - } - if authHeaderValue != "" { - headerValue := strings.Split(authHeaderValue, " ") - token := "" - switch { - case len(headerValue) == 2: - // Make sure we only capture bearer token , not other types like Basic auth. - if headerValue[0] == "Bearer" { - token = headerValue[1] - } - case len(headerValue) == 1: - // Treat the entire value as a token. - token = authHeaderValue - default: - logger.Warn("Invalid authorization header value, skipping token propagation") - } - h.ServeHTTP(w, r.WithContext(ContextWithBearerToken(ctx, token))) - } else { - h.ServeHTTP(w, r.WithContext(ctx)) - } - }) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go b/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go deleted file mode 100644 index 7e13e6300b5..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/pkg/bearertoken/transport.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bearertoken - -import ( - "errors" - "net/http" -) - -// RoundTripper wraps another http.RoundTripper and injects -// an authentication header with bearer token into requests. -type RoundTripper struct { - // Transport is the underlying http.RoundTripper being wrapped. Required. - Transport http.RoundTripper - - // StaticToken is the pre-configured bearer token. Optional. - StaticToken string - - // OverrideFromCtx enables reading bearer token from Context. - OverrideFromCtx bool -} - -// RoundTrip injects the outbound Authorization header with the -// token provided in the inbound request. -func (tr RoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - if tr.Transport == nil { - return nil, errors.New("no http.RoundTripper provided") - } - token := tr.StaticToken - if tr.OverrideFromCtx { - headerToken, _ := GetBearerToken(r.Context()) - if headerToken != "" { - token = headerToken - } - } - if token != "" { - r.Header.Set("Authorization", "Bearer "+token) - } - return tr.Transport.RoundTrip(r) -} diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go index 621162a36fe..81531004f38 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/cfgmgr.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package clientcfghttp @@ -18,20 +7,20 @@ import ( "context" "errors" - "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore" + "github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy" "github.com/jaegertracing/jaeger/proto-gen/api_v2" "github.com/jaegertracing/jaeger/thrift-gen/baggage" ) // ConfigManager implements ClientConfigManager. type ConfigManager struct { - SamplingStrategyStore strategystore.StrategyStore - BaggageManager baggage.BaggageRestrictionManager + SamplingProvider samplingstrategy.Provider + BaggageManager baggage.BaggageRestrictionManager } // GetSamplingStrategy implements ClientConfigManager.GetSamplingStrategy. func (c *ConfigManager) GetSamplingStrategy(ctx context.Context, serviceName string) (*api_v2.SamplingStrategyResponse, error) { - return c.SamplingStrategyStore.GetSamplingStrategy(ctx, serviceName) + return c.SamplingProvider.GetSamplingStrategy(ctx, serviceName) } // GetBaggageRestrictions implements ClientConfigManager.GetBaggageRestrictions. diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go index 2c7bbc12f09..6b885fffbe2 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp/handler.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package clientcfghttp @@ -109,6 +98,17 @@ func (h *HTTPHandler) RegisterRoutes(router *mux.Router) { }).Methods(http.MethodGet) } +// RegisterRoutes registers configuration handlers with HTTP Router. +func (h *HTTPHandler) RegisterRoutesWithHTTP(router *http.ServeMux) { + prefix := h.params.BasePath + router.HandleFunc( + prefix+"/", + func(w http.ResponseWriter, r *http.Request) { + h.serveSamplingHTTP(w, r, h.encodeThriftLegacy) + }, + ) +} + func (h *HTTPHandler) serviceFromRequest(w http.ResponseWriter, r *http.Request) (string, error) { services := r.URL.Query()["service"] if len(services) != 1 { @@ -119,9 +119,9 @@ func (h *HTTPHandler) serviceFromRequest(w http.ResponseWriter, r *http.Request) return services[0], nil } -func (h *HTTPHandler) writeJSON(w http.ResponseWriter, json []byte) error { +func (h *HTTPHandler) writeJSON(w http.ResponseWriter, jsonData []byte) error { w.Header().Add("Content-Type", mimeTypeApplicationJSON) - if _, err := w.Write(json); err != nil { + if _, err := w.Write(jsonData); err != nil { h.metrics.WriteFailures.Inc(1) return err } @@ -213,8 +213,8 @@ var samplingStrategyTypes = []api_v2.SamplingStrategyType{ // // Thrift 0.9.3 classes generate this JSON: // {"strategyType":"PROBABILISTIC","probabilisticSampling":{"samplingRate":0.5}} -func (h *HTTPHandler) encodeThriftEnums092(json []byte) []byte { - str := string(json) +func (*HTTPHandler) encodeThriftEnums092(jsonData []byte) []byte { + str := string(jsonData) for _, strategyType := range samplingStrategyTypes { str = strings.Replace( str, diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go index 106249c3647..cff06f4e0a4 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/cert_watcher.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tlscfg diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go index b5525e51bc5..15943a21c97 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_unix.go @@ -1,16 +1,5 @@ // Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build !windows // +build !windows diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go index 56aa4f7746c..fae3a4b2ebf 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/certpool_windows.go @@ -1,16 +1,5 @@ // Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 //go:build windows // +build windows diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go index 4c71db9379b..dc667deed02 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/ciphersuites.go @@ -1,16 +1,5 @@ // Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tlscfg diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go index 96de6b7cf4e..4488bc9dbfe 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/flags.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tlscfg diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go index 2707887831d..4604313e157 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/config/tlscfg/options.go @@ -1,28 +1,19 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package tlscfg import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "io" "os" "path/filepath" "time" + "go.opentelemetry.io/collector/config/configtls" "go.uber.org/zap" ) @@ -39,88 +30,88 @@ type Options struct { MaxVersion string `mapstructure:"max_version"` SkipHostVerify bool `mapstructure:"skip_host_verify"` ReloadInterval time.Duration `mapstructure:"reload_interval"` - certWatcher *certWatcher `mapstructure:"-"` + certWatcher *certWatcher } var systemCertPool = x509.SystemCertPool // to allow overriding in unit test // Config loads TLS certificates and returns a TLS Config. -func (p *Options) Config(logger *zap.Logger) (*tls.Config, error) { +func (o *Options) Config(logger *zap.Logger) (*tls.Config, error) { var minVersionId, maxVersionId uint16 - certPool, err := p.loadCertPool() + certPool, err := o.loadCertPool() if err != nil { return nil, fmt.Errorf("failed to load CA CertPool: %w", err) } - cipherSuiteIds, err := CipherSuiteNamesToIDs(p.CipherSuites) + cipherSuiteIds, err := CipherSuiteNamesToIDs(o.CipherSuites) if err != nil { return nil, fmt.Errorf("failed to get cipher suite ids from cipher suite names: %w", err) } - if p.MinVersion != "" { - minVersionId, err = VersionNameToID(p.MinVersion) + if o.MinVersion != "" { + minVersionId, err = VersionNameToID(o.MinVersion) if err != nil { return nil, fmt.Errorf("failed to get minimum tls version: %w", err) } } - if p.MaxVersion != "" { - maxVersionId, err = VersionNameToID(p.MaxVersion) + if o.MaxVersion != "" { + maxVersionId, err = VersionNameToID(o.MaxVersion) if err != nil { return nil, fmt.Errorf("failed to get maximum tls version: %w", err) } } - if p.MinVersion != "" && p.MaxVersion != "" { + if o.MinVersion != "" && o.MaxVersion != "" { if minVersionId > maxVersionId { - return nil, fmt.Errorf("minimum tls version can't be greater than maximum tls version") + return nil, errors.New("minimum tls version can't be greater than maximum tls version") } } tlsCfg := &tls.Config{ RootCAs: certPool, - ServerName: p.ServerName, - InsecureSkipVerify: p.SkipHostVerify, /* #nosec G402*/ + ServerName: o.ServerName, + InsecureSkipVerify: o.SkipHostVerify, /* #nosec G402*/ CipherSuites: cipherSuiteIds, MinVersion: minVersionId, MaxVersion: maxVersionId, } - if p.ClientCAPath != "" { + if o.ClientCAPath != "" { // TODO this should be moved to certWatcher, since it already loads key pair certPool := x509.NewCertPool() - if err := addCertToPool(p.ClientCAPath, certPool); err != nil { + if err := addCertToPool(o.ClientCAPath, certPool); err != nil { return nil, err } tlsCfg.ClientCAs = certPool tlsCfg.ClientAuth = tls.RequireAndVerifyClientCert } - certWatcher, err := newCertWatcher(*p, logger, tlsCfg.RootCAs, tlsCfg.ClientCAs) + certWatcher, err := newCertWatcher(*o, logger, tlsCfg.RootCAs, tlsCfg.ClientCAs) if err != nil { return nil, err } - p.certWatcher = certWatcher + o.certWatcher = certWatcher - if (p.CertPath == "" && p.KeyPath != "") || (p.CertPath != "" && p.KeyPath == "") { - return nil, fmt.Errorf("for client auth via TLS, either both client certificate and key must be supplied, or neither") + if (o.CertPath == "" && o.KeyPath != "") || (o.CertPath != "" && o.KeyPath == "") { + return nil, errors.New("for client auth via TLS, either both client certificate and key must be supplied, or neither") } - if p.CertPath != "" && p.KeyPath != "" { + if o.CertPath != "" && o.KeyPath != "" { tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - return p.certWatcher.certificate(), nil + return o.certWatcher.certificate(), nil } // GetClientCertificate is used on the client side when server is configured with tls.RequireAndVerifyClientCert e.g. mTLS tlsCfg.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - return p.certWatcher.certificate(), nil + return o.certWatcher.certificate(), nil } } return tlsCfg, nil } -func (p Options) loadCertPool() (*x509.CertPool, error) { - if len(p.CAPath) == 0 { // no truststore given, use SystemCertPool +func (o Options) loadCertPool() (*x509.CertPool, error) { + if len(o.CAPath) == 0 { // no truststore given, use SystemCertPool certPool, err := loadSystemCertPool() if err != nil { return nil, fmt.Errorf("failed to load SystemCertPool: %w", err) @@ -129,12 +120,55 @@ func (p Options) loadCertPool() (*x509.CertPool, error) { } certPool := x509.NewCertPool() // setup user specified truststore - if err := addCertToPool(p.CAPath, certPool); err != nil { + if err := addCertToPool(o.CAPath, certPool); err != nil { return nil, err } return certPool, nil } +func (o *Options) ToOtelClientConfig() configtls.ClientConfig { + return configtls.ClientConfig{ + Insecure: !o.Enabled, + InsecureSkipVerify: o.SkipHostVerify, + ServerName: o.ServerName, + Config: configtls.Config{ + CAFile: o.CAPath, + CertFile: o.CertPath, + KeyFile: o.KeyPath, + CipherSuites: o.CipherSuites, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, + ReloadInterval: o.ReloadInterval, + }, + } +} + +// ToOtelServerConfig provides a mapping between from Options to OTEL's TLS Server Configuration. +func (o *Options) ToOtelServerConfig() *configtls.ServerConfig { + if !o.Enabled { + return nil + } + + cfg := &configtls.ServerConfig{ + ClientCAFile: o.ClientCAPath, + Config: configtls.Config{ + CAFile: o.CAPath, + CertFile: o.CertPath, + KeyFile: o.KeyPath, + CipherSuites: o.CipherSuites, + MinVersion: o.MinVersion, + MaxVersion: o.MaxVersion, + ReloadInterval: o.ReloadInterval, + }, + } + + if o.ReloadInterval > 0 { + cfg.ReloadClientCAFile = true + } + + return cfg +} + func addCertToPool(caPath string, certPool *x509.CertPool) error { caPEM, err := os.ReadFile(filepath.Clean(caPath)) if err != nil { @@ -150,9 +184,9 @@ func addCertToPool(caPath string, certPool *x509.CertPool) error { var _ io.Closer = (*Options)(nil) // Close shuts down the embedded certificate watcher. -func (p *Options) Close() error { - if p.certWatcher != nil { - return p.certWatcher.Close() +func (o *Options) Close() error { + if o.certWatcher != nil { + return o.certWatcher.Close() } return nil } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go index ae1780d8b33..5385b3c0c71 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/discoverer.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package discovery diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go index 2329870f7ca..84800e26ad1 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/grpcresolver/grpc_resolver.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package grpcresolver @@ -85,7 +74,7 @@ func New( } // Build returns itself for Resolver, because it's both a builder and a resolver. -func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { +func (r *Resolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { r.cc = cc // Update conn states if proactively updates already work @@ -106,7 +95,7 @@ func (r *Resolver) Scheme() string { // ResolveNow is a noop for Resolver since resolver is already firing r.cc.UpdatesState every time // it receives updates of new instance from discoCh -func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*Resolver) ResolveNow(resolver.ResolveNowOptions) {} func (r *Resolver) watcher() { defer r.closing.Done() diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go index fba50b71db7..487ca20bab7 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/discovery/notifier.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package discovery diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go b/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go index 1abf600f4c7..41033984407 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/distributedlock/interface.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package distributedlock diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go b/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go index d3c929d1d9b..36f194b6522 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/fswatcher/fswatcher.go @@ -1,22 +1,11 @@ // Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package fswatcher import ( "crypto/sha256" - "fmt" + "encoding/hex" "io" "os" "path" @@ -86,11 +75,11 @@ func (w *FSWatcher) setupWatchedPaths(filepaths []string) error { if p == "" { continue } - if h, err := hashFile(p); err == nil { - w.fileHashContentMap[p] = h - } else { + h, err := hashFile(p) + if err != nil { return err } + w.fileHashContentMap[p] = h dir := path.Dir(p) if _, ok := uniqueDirs[dir]; !ok { if err := w.watcher.Add(dir); err != nil { @@ -143,10 +132,10 @@ func (w *FSWatcher) Close() error { } // isModified returns true if the file has been modified since the last check. -func (w *FSWatcher) isModified(filepath string, previousHash string) (bool, string) { - hash, err := hashFile(filepath) +func (w *FSWatcher) isModified(filePathName string, previousHash string) (bool, string) { + hash, err := hashFile(filePathName) if err != nil { - w.logger.Warn("Unable to read the file", zap.String("file", filepath), zap.Error(err)) + w.logger.Warn("Unable to read the file", zap.String("file", filePathName), zap.Error(err)) return true, "" } return previousHash != hash, hash @@ -165,5 +154,5 @@ func hashFile(file string) (string, error) { return "", err } - return fmt.Sprintf("%x", h.Sum(nil)), nil + return hex.EncodeToString(h.Sum(nil)), nil } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go b/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go index f7df1c2d2ec..55322baa89c 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/gogocodec/codec.go @@ -1,16 +1,5 @@ // Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package gogocodec @@ -22,6 +11,7 @@ import ( gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/mem" ) const ( @@ -29,7 +19,7 @@ const ( jaegerModelPkgPath = "github.com/jaegertracing/jaeger/model" ) -var defaultCodec encoding.Codec +var defaultCodec encoding.CodecV2 // CustomType is an interface that Gogo expects custom types to implement. // https://github.com/gogo/protobuf/blob/master/custom_types.md @@ -45,44 +35,45 @@ type CustomType interface { } func init() { - defaultCodec = encoding.GetCodec(proto.Name) + defaultCodec = encoding.GetCodecV2(proto.Name) defaultCodec.Name() // ensure it's not nil - encoding.RegisterCodec(newCodec()) + encoding.RegisterCodecV2(newCodec()) } // gogoCodec forces the use of gogo proto marshalling/unmarshalling for // Jaeger proto types (package jaeger/gen-proto). type gogoCodec struct{} -var _ encoding.Codec = (*gogoCodec)(nil) +var _ encoding.CodecV2 = (*gogoCodec)(nil) func newCodec() *gogoCodec { return &gogoCodec{} } // Name implements encoding.Codec -func (c *gogoCodec) Name() string { +func (*gogoCodec) Name() string { return proto.Name } // Marshal implements encoding.Codec -func (c *gogoCodec) Marshal(v interface{}) ([]byte, error) { +func (*gogoCodec) Marshal(v any) (mem.BufferSlice, error) { t := reflect.TypeOf(v) elem := t.Elem() // use gogo proto only for Jaeger types if useGogo(elem) { - return gogoproto.Marshal(v.(gogoproto.Message)) + bytes, err := gogoproto.Marshal(v.(gogoproto.Message)) + return mem.BufferSlice{mem.SliceBuffer(bytes)}, err } return defaultCodec.Marshal(v) } // Unmarshal implements encoding.Codec -func (c *gogoCodec) Unmarshal(data []byte, v interface{}) error { +func (*gogoCodec) Unmarshal(data mem.BufferSlice, v any) error { t := reflect.TypeOf(v) elem := t.Elem() // only for collections // use gogo proto only for Jaeger types if useGogo(elem) { - return gogoproto.Unmarshal(data, v.(gogoproto.Message)) + return gogoproto.Unmarshal(data.Materialize(), v.(gogoproto.Message)) } return defaultCodec.Unmarshal(data, v) } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go index 1a714ea70c6..bbc50a803d7 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/healthcheck/handler.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package healthcheck @@ -106,7 +95,7 @@ func (hc *HealthCheck) Handler() http.Handler { }) } -func (hc *HealthCheck) createRespBody(state state, template healthCheckResponse) []byte { +func (*HealthCheck) createRespBody(state state, template healthCheckResponse) []byte { resp := template // clone if state.status == Ready { resp.UpSince = state.upSince diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go index e594940857e..ad9d94febf7 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/counter.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go index e77257127d9..faeaf3d1585 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/factory.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics @@ -64,19 +53,19 @@ var NullFactory Factory = nullFactory{} type nullFactory struct{} -func (nullFactory) Counter(options Options) Counter { +func (nullFactory) Counter(Options) Counter { return NullCounter } -func (nullFactory) Timer(options TimerOptions) Timer { +func (nullFactory) Timer(TimerOptions) Timer { return NullTimer } -func (nullFactory) Gauge(options Options) Gauge { +func (nullFactory) Gauge(Options) Gauge { return NullGauge } -func (nullFactory) Histogram(options HistogramOptions) Histogram { +func (nullFactory) Histogram(HistogramOptions) Histogram { return NullHistogram } -func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory } +func (nullFactory) Namespace(NSOptions /* scope */) Factory { return NullFactory } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go index 53631d451d6..3445b7ac32d 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/gauge.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go index d3bd6174fe8..b737e74c476 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/histogram.go @@ -1,16 +1,5 @@ // Copyright (c) 2018 The Jaeger Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go index 86cf913bc94..b81e3e4a775 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/metrics.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics @@ -32,7 +21,7 @@ import ( // of type Counter or Gauge or Timer. // // Errors during Init lead to a panic. -func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) { +func MustInit(metrics any, factory Factory, globalTags map[string]string) { if err := Init(metrics, factory, globalTags); err != nil { panic(err.Error()) } @@ -40,7 +29,7 @@ func MustInit(metrics interface{}, factory Factory, globalTags map[string]string // Init does the same as MustInit, but returns an error instead of // panicking. -func Init(m interface{}, factory Factory, globalTags map[string]string) error { +func Init(m any, factory Factory, globalTags map[string]string) error { // Allow user to opt out of reporting metrics by passing in nil. if factory == nil { factory = NullFactory @@ -101,7 +90,7 @@ func Init(m interface{}, factory Factory, globalTags map[string]string) error { } } help := field.Tag.Get("help") - var obj interface{} + var obj any switch { case field.Type.AssignableTo(counterPtrType): obj = factory.Counter(Options{ diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go index 9764382db10..a0587cbd06e 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/package.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package metrics provides an internal abstraction for metrics API, // and command line flags for configuring the metrics backend. diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go index 0ca70979260..4685eaea9d9 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/stopwatch.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go index 75df952d747..9396e50b8c8 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/metrics/timer.go @@ -1,17 +1,6 @@ // Copyright (c) 2022 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metrics diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go b/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go index 201a18adade..9b880cce6e2 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/netutils/port.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package netutils diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go b/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go index 678c179dade..0a65c6cea0e 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/recoveryhandler/zap.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017-2018 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package recoveryhandler @@ -29,7 +18,7 @@ type zapRecoveryWrapper struct { } // Println logs an error message with the given fields -func (z zapRecoveryWrapper) Println(args ...interface{}) { +func (z zapRecoveryWrapper) Println(args ...any) { z.logger.Error(fmt.Sprint(args...)) } diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go index 6d4fc208cc9..3100d5b32d3 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/build.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package version diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go index 343c1a477ac..67c824bdffd 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/command.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package version @@ -30,7 +19,7 @@ func Command() *cobra.Command { Use: "version", Short: "Print the version.", Long: `Print the version and build information.`, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ /* args */ []string) error { json, err := json.Marshal(info) if err != nil { return err diff --git a/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go b/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go index adbc7c55e58..bb094af94a9 100644 --- a/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go +++ b/vendor/github.com/jaegertracing/jaeger/pkg/version/handler.go @@ -1,16 +1,5 @@ // Copyright (c) 2017 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package version @@ -24,12 +13,12 @@ import ( // RegisterHandler registers version handler to /version func RegisterHandler(mu *http.ServeMux, logger *zap.Logger) { info := Get() - json, err := json.Marshal(info) + jsonData, err := json.Marshal(info) if err != nil { logger.Fatal("Could not get Jaeger version", zap.Error(err)) } mu.HandleFunc("/version", func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(200) - w.Write(json) + w.Write(jsonData) }) } diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go index 41741e7b950..2bd14685d27 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/archive.go @@ -1,16 +1,5 @@ // Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -44,7 +33,7 @@ type archiveWriter struct { // GetTrace takes a traceID and returns a Trace associated with that traceID from Archive Storage func (r *archiveReader) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { - stream, err := r.client.GetArchiveTrace(upgradeContext(ctx), &storage_v1.GetTraceRequest{ + stream, err := r.client.GetArchiveTrace(ctx, &storage_v1.GetTraceRequest{ TraceID: traceID, }) if status.Code(err) == codes.NotFound { @@ -58,22 +47,22 @@ func (r *archiveReader) GetTrace(ctx context.Context, traceID model.TraceID) (*m } // GetServices not used in archiveReader -func (r *archiveReader) GetServices(ctx context.Context) ([]string, error) { +func (*archiveReader) GetServices(context.Context) ([]string, error) { return nil, errors.New("GetServices not implemented") } // GetOperations not used in archiveReader -func (r *archiveReader) GetOperations(ctx context.Context, query spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { +func (*archiveReader) GetOperations(context.Context, spanstore.OperationQueryParameters) ([]spanstore.Operation, error) { return nil, errors.New("GetOperations not implemented") } // FindTraces not used in archiveReader -func (r *archiveReader) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { +func (*archiveReader) FindTraces(context.Context, *spanstore.TraceQueryParameters) ([]*model.Trace, error) { return nil, errors.New("FindTraces not implemented") } // FindTraceIDs not used in archiveReader -func (r *archiveReader) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { +func (*archiveReader) FindTraceIDs(context.Context, *spanstore.TraceQueryParameters) ([]model.TraceID, error) { return nil, errors.New("FindTraceIDs not implemented") } diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go index 9b24b6c18eb..abe296b70e3 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_client.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -23,11 +12,10 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "github.com/jaegertracing/jaeger/model" - "github.com/jaegertracing/jaeger/pkg/bearertoken" + _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration "github.com/jaegertracing/jaeger/proto-gen/storage_v1" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" @@ -37,16 +25,13 @@ import ( const BearerTokenKey = "bearer.token" var ( - _ StoragePlugin = (*grpcClient)(nil) - _ ArchiveStoragePlugin = (*grpcClient)(nil) - _ PluginCapabilities = (*grpcClient)(nil) - - // upgradeContext composites several steps of upgrading context - upgradeContext = composeContextUpgradeFuncs(upgradeContextWithBearerToken) + _ StoragePlugin = (*GRPCClient)(nil) + _ ArchiveStoragePlugin = (*GRPCClient)(nil) + _ PluginCapabilities = (*GRPCClient)(nil) ) -// grpcClient implements shared.StoragePlugin and reads/writes spans and dependencies -type grpcClient struct { +// GRPCClient implements shared.StoragePlugin and reads/writes spans and dependencies +type GRPCClient struct { readerClient storage_v1.SpanReaderPluginClient writerClient storage_v1.SpanWriterPluginClient archiveReaderClient storage_v1.ArchiveSpanReaderPluginClient @@ -56,78 +41,48 @@ type grpcClient struct { streamWriterClient storage_v1.StreamingSpanWriterPluginClient } -func NewGRPCClient(c *grpc.ClientConn) *grpcClient { - return &grpcClient{ - readerClient: storage_v1.NewSpanReaderPluginClient(c), - writerClient: storage_v1.NewSpanWriterPluginClient(c), - archiveReaderClient: storage_v1.NewArchiveSpanReaderPluginClient(c), - archiveWriterClient: storage_v1.NewArchiveSpanWriterPluginClient(c), - capabilitiesClient: storage_v1.NewPluginCapabilitiesClient(c), - depsReaderClient: storage_v1.NewDependenciesReaderPluginClient(c), - streamWriterClient: storage_v1.NewStreamingSpanWriterPluginClient(c), - } -} - -// ContextUpgradeFunc is a functional type that can be composed to upgrade context -type ContextUpgradeFunc func(ctx context.Context) context.Context - -// composeContextUpgradeFuncs composes ContextUpgradeFunc and returns a composed function -// to run the given func in strict order. -func composeContextUpgradeFuncs(funcs ...ContextUpgradeFunc) ContextUpgradeFunc { - return func(ctx context.Context) context.Context { - for _, fun := range funcs { - ctx = fun(ctx) - } - return ctx - } -} - -// upgradeContextWithBearerToken turns the context into a gRPC outgoing context with bearer token -// in the request metadata, if the original context has bearer token attached. -// Otherwise returns original context. -func upgradeContextWithBearerToken(ctx context.Context) context.Context { - bearerToken, hasToken := bearertoken.GetBearerToken(ctx) - if hasToken { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - md = metadata.New(nil) - } - md.Set(BearerTokenKey, bearerToken) - return metadata.NewOutgoingContext(ctx, md) +func NewGRPCClient(tracedConn *grpc.ClientConn, untracedConn *grpc.ClientConn) *GRPCClient { + return &GRPCClient{ + readerClient: storage_v1.NewSpanReaderPluginClient(tracedConn), + writerClient: storage_v1.NewSpanWriterPluginClient(untracedConn), + archiveReaderClient: storage_v1.NewArchiveSpanReaderPluginClient(tracedConn), + archiveWriterClient: storage_v1.NewArchiveSpanWriterPluginClient(untracedConn), + capabilitiesClient: storage_v1.NewPluginCapabilitiesClient(tracedConn), + depsReaderClient: storage_v1.NewDependenciesReaderPluginClient(tracedConn), + streamWriterClient: storage_v1.NewStreamingSpanWriterPluginClient(untracedConn), } - return ctx } // DependencyReader implements shared.StoragePlugin. -func (c *grpcClient) DependencyReader() dependencystore.Reader { +func (c *GRPCClient) DependencyReader() dependencystore.Reader { return c } // SpanReader implements shared.StoragePlugin. -func (c *grpcClient) SpanReader() spanstore.Reader { +func (c *GRPCClient) SpanReader() spanstore.Reader { return c } // SpanWriter implements shared.StoragePlugin. -func (c *grpcClient) SpanWriter() spanstore.Writer { +func (c *GRPCClient) SpanWriter() spanstore.Writer { return c } -func (c *grpcClient) StreamingSpanWriter() spanstore.Writer { +func (c *GRPCClient) StreamingSpanWriter() spanstore.Writer { return newStreamingSpanWriter(c.streamWriterClient) } -func (c *grpcClient) ArchiveSpanReader() spanstore.Reader { +func (c *GRPCClient) ArchiveSpanReader() spanstore.Reader { return &archiveReader{client: c.archiveReaderClient} } -func (c *grpcClient) ArchiveSpanWriter() spanstore.Writer { +func (c *GRPCClient) ArchiveSpanWriter() spanstore.Writer { return &archiveWriter{client: c.archiveWriterClient} } // GetTrace takes a traceID and returns a Trace associated with that traceID -func (c *grpcClient) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { - stream, err := c.readerClient.GetTrace(upgradeContext(ctx), &storage_v1.GetTraceRequest{ +func (c *GRPCClient) GetTrace(ctx context.Context, traceID model.TraceID) (*model.Trace, error) { + stream, err := c.readerClient.GetTrace(ctx, &storage_v1.GetTraceRequest{ TraceID: traceID, }) if status.Code(err) == codes.NotFound { @@ -141,8 +96,8 @@ func (c *grpcClient) GetTrace(ctx context.Context, traceID model.TraceID) (*mode } // GetServices returns a list of all known services -func (c *grpcClient) GetServices(ctx context.Context) ([]string, error) { - resp, err := c.readerClient.GetServices(upgradeContext(ctx), &storage_v1.GetServicesRequest{}) +func (c *GRPCClient) GetServices(ctx context.Context) ([]string, error) { + resp, err := c.readerClient.GetServices(ctx, &storage_v1.GetServicesRequest{}) if err != nil { return nil, fmt.Errorf("plugin error: %w", err) } @@ -151,11 +106,11 @@ func (c *grpcClient) GetServices(ctx context.Context) ([]string, error) { } // GetOperations returns the operations of a given service -func (c *grpcClient) GetOperations( +func (c *GRPCClient) GetOperations( ctx context.Context, query spanstore.OperationQueryParameters, ) ([]spanstore.Operation, error) { - resp, err := c.readerClient.GetOperations(upgradeContext(ctx), &storage_v1.GetOperationsRequest{ + resp, err := c.readerClient.GetOperations(ctx, &storage_v1.GetOperationsRequest{ Service: query.ServiceName, SpanKind: query.SpanKind, }) @@ -182,8 +137,8 @@ func (c *grpcClient) GetOperations( } // FindTraces retrieves traces that match the traceQuery -func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { - stream, err := c.readerClient.FindTraces(upgradeContext(ctx), &storage_v1.FindTracesRequest{ +func (c *GRPCClient) FindTraces(ctx context.Context, query *spanstore.TraceQueryParameters) ([]*model.Trace, error) { + stream, err := c.readerClient.FindTraces(ctx, &storage_v1.FindTracesRequest{ Query: &storage_v1.TraceQueryParameters{ ServiceName: query.ServiceName, OperationName: query.OperationName, @@ -192,7 +147,8 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery StartTimeMax: query.StartTimeMax, DurationMin: query.DurationMin, DurationMax: query.DurationMax, - NumTraces: int32(query.NumTraces), + //nolint: gosec // G115 + NumTraces: int32(query.NumTraces), }, }) if err != nil { @@ -208,7 +164,7 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery } for i, span := range received.Spans { - if span.TraceID != traceID { + if trace == nil || span.TraceID != traceID { trace = &model.Trace{} traceID = span.TraceID traces = append(traces, trace) @@ -220,8 +176,8 @@ func (c *grpcClient) FindTraces(ctx context.Context, query *spanstore.TraceQuery } // FindTraceIDs retrieves traceIDs that match the traceQuery -func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { - resp, err := c.readerClient.FindTraceIDs(upgradeContext(ctx), &storage_v1.FindTraceIDsRequest{ +func (c *GRPCClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQueryParameters) ([]model.TraceID, error) { + resp, err := c.readerClient.FindTraceIDs(ctx, &storage_v1.FindTraceIDsRequest{ Query: &storage_v1.TraceQueryParameters{ ServiceName: query.ServiceName, OperationName: query.OperationName, @@ -230,7 +186,8 @@ func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQue StartTimeMax: query.StartTimeMax, DurationMin: query.DurationMin, DurationMax: query.DurationMax, - NumTraces: int32(query.NumTraces), + //nolint: gosec // G115 + NumTraces: int32(query.NumTraces), }, }) if err != nil { @@ -241,7 +198,7 @@ func (c *grpcClient) FindTraceIDs(ctx context.Context, query *spanstore.TraceQue } // WriteSpan saves the span -func (c *grpcClient) WriteSpan(ctx context.Context, span *model.Span) error { +func (c *GRPCClient) WriteSpan(ctx context.Context, span *model.Span) error { _, err := c.writerClient.WriteSpan(ctx, &storage_v1.WriteSpanRequest{ Span: span, }) @@ -252,7 +209,7 @@ func (c *grpcClient) WriteSpan(ctx context.Context, span *model.Span) error { return nil } -func (c *grpcClient) Close() error { +func (c *GRPCClient) Close() error { _, err := c.writerClient.Close(context.Background(), &storage_v1.CloseWriterRequest{}) if err != nil && status.Code(err) != codes.Unimplemented { return fmt.Errorf("plugin error: %w", err) @@ -262,7 +219,7 @@ func (c *grpcClient) Close() error { } // GetDependencies returns all interservice dependencies -func (c *grpcClient) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { +func (c *GRPCClient) GetDependencies(ctx context.Context, endTs time.Time, lookback time.Duration) ([]model.DependencyLink, error) { resp, err := c.depsReaderClient.GetDependencies(ctx, &storage_v1.GetDependenciesRequest{ EndTime: endTs, StartTime: endTs.Add(-lookback), @@ -274,7 +231,7 @@ func (c *grpcClient) GetDependencies(ctx context.Context, endTs time.Time, lookb return resp.Dependencies, nil } -func (c *grpcClient) Capabilities() (*Capabilities, error) { +func (c *GRPCClient) Capabilities() (*Capabilities, error) { capabilities, err := c.capabilitiesClient.Capabilities(context.Background(), &storage_v1.CapabilitiesRequest{}) if status.Code(err) == codes.Unimplemented { return &Capabilities{}, nil diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go index 98f27a53540..564a625ebf0 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/grpc_handler.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -22,9 +11,12 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" "github.com/jaegertracing/jaeger/model" + _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration "github.com/jaegertracing/jaeger/proto-gen/storage_v1" "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" @@ -83,7 +75,7 @@ func NewGRPCHandlerWithPlugins( } // Register registers the server as gRPC methods handler. -func (s *GRPCHandler) Register(ss *grpc.Server) error { +func (s *GRPCHandler) Register(ss *grpc.Server, hs *health.Server) error { storage_v1.RegisterSpanReaderPluginServer(ss, s) storage_v1.RegisterSpanWriterPluginServer(ss, s) storage_v1.RegisterArchiveSpanReaderPluginServer(ss, s) @@ -91,6 +83,16 @@ func (s *GRPCHandler) Register(ss *grpc.Server) error { storage_v1.RegisterPluginCapabilitiesServer(ss, s) storage_v1.RegisterDependenciesReaderPluginServer(ss, s) storage_v1.RegisterStreamingSpanWriterPluginServer(ss, s) + + hs.SetServingStatus("jaeger.storage.v1.SpanReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.SpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.ArchiveSpanReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.ArchiveSpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.PluginCapabilities", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.DependenciesReaderPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + hs.SetServingStatus("jaeger.storage.v1.StreamingSpanWriterPlugin", grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(ss, hs) + return nil } @@ -136,23 +138,22 @@ func (s *GRPCHandler) WriteSpan(ctx context.Context, r *storage_v1.WriteSpanRequ return &storage_v1.WriteSpanResponse{}, nil } -func (s *GRPCHandler) Close(ctx context.Context, r *storage_v1.CloseWriterRequest) (*storage_v1.CloseWriterResponse, error) { +func (s *GRPCHandler) Close(context.Context, *storage_v1.CloseWriterRequest) (*storage_v1.CloseWriterResponse, error) { if closer, ok := s.impl.SpanWriter().(io.Closer); ok { if err := closer.Close(); err != nil { return nil, err } return &storage_v1.CloseWriterResponse{}, nil - } else { - return nil, status.Error(codes.Unimplemented, "span writer does not support graceful shutdown") } + return nil, status.Error(codes.Unimplemented, "span writer does not support graceful shutdown") } // GetTrace takes a traceID and streams a Trace associated with that traceID func (s *GRPCHandler) GetTrace(r *storage_v1.GetTraceRequest, stream storage_v1.SpanReaderPlugin_GetTraceServer) error { trace, err := s.impl.SpanReader().GetTrace(stream.Context(), r.TraceID) if errors.Is(err, spanstore.ErrTraceNotFound) { - return status.Errorf(codes.NotFound, spanstore.ErrTraceNotFound.Error()) + return status.Error(codes.NotFound, spanstore.ErrTraceNotFound.Error()) } if err != nil { return err @@ -167,7 +168,7 @@ func (s *GRPCHandler) GetTrace(r *storage_v1.GetTraceRequest, stream storage_v1. } // GetServices returns a list of all known services -func (s *GRPCHandler) GetServices(ctx context.Context, r *storage_v1.GetServicesRequest) (*storage_v1.GetServicesResponse, error) { +func (s *GRPCHandler) GetServices(ctx context.Context, _ *storage_v1.GetServicesRequest) (*storage_v1.GetServicesResponse, error) { services, err := s.impl.SpanReader().GetServices(ctx) if err != nil { return nil, err @@ -247,7 +248,7 @@ func (s *GRPCHandler) FindTraceIDs(ctx context.Context, r *storage_v1.FindTraceI }, nil } -func (s *GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.SpansResponseChunk) error) error { +func (*GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.SpansResponseChunk) error) error { chunk := make([]model.Span, 0, len(spans)) for i := 0; i < len(spans); i += spanBatchSize { chunk = chunk[:0] @@ -262,7 +263,7 @@ func (s *GRPCHandler) sendSpans(spans []*model.Span, sendFn func(*storage_v1.Spa return nil } -func (s *GRPCHandler) Capabilities(ctx context.Context, request *storage_v1.CapabilitiesRequest) (*storage_v1.CapabilitiesResponse, error) { +func (s *GRPCHandler) Capabilities(context.Context, *storage_v1.CapabilitiesRequest) (*storage_v1.CapabilitiesResponse, error) { return &storage_v1.CapabilitiesResponse{ ArchiveSpanReader: s.impl.ArchiveSpanReader() != nil, ArchiveSpanWriter: s.impl.ArchiveSpanWriter() != nil, @@ -277,7 +278,7 @@ func (s *GRPCHandler) GetArchiveTrace(r *storage_v1.GetTraceRequest, stream stor } trace, err := reader.GetTrace(stream.Context(), r.TraceID) if errors.Is(err, spanstore.ErrTraceNotFound) { - return status.Errorf(codes.NotFound, spanstore.ErrTraceNotFound.Error()) + return status.Error(codes.NotFound, spanstore.ErrTraceNotFound.Error()) } if err != nil { return err diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go index c6345a29b82..9abc2f75dce 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/interface.go @@ -1,40 +1,13 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared import ( - "github.com/hashicorp/go-plugin" - "github.com/jaegertracing/jaeger/storage/dependencystore" "github.com/jaegertracing/jaeger/storage/spanstore" ) -// StoragePluginIdentifier is the identifier that is shared by plugin and host. -const StoragePluginIdentifier = "storage_plugin" - -// Handshake is a common handshake that is shared by plugin and host. -var Handshake = plugin.HandshakeConfig{ - MagicCookieKey: "STORAGE_PLUGIN", - MagicCookieValue: "jaeger", -} - -// PluginMap is the map of plugins we can dispense. -var PluginMap = map[string]plugin.Plugin{ - StoragePluginIdentifier: &StorageGRPCPlugin{}, -} - // StoragePlugin is the interface we're exposing as a plugin. type StoragePlugin interface { SpanReader() spanstore.Reader diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go deleted file mode 100644 index b4a3fabf93e..00000000000 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/plugin.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2020 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package shared - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - _ "github.com/jaegertracing/jaeger/pkg/gogocodec" // force gogo codec registration -) - -// Ensure plugin.GRPCPlugin API match. -var _ plugin.GRPCPlugin = (*StorageGRPCPlugin)(nil) - -// StorageGRPCPlugin is the implementation of plugin.GRPCPlugin. -type StorageGRPCPlugin struct { - plugin.Plugin - - // Concrete implementation, This is only used for plugins that are written in Go. - Impl StoragePlugin - ArchiveImpl ArchiveStoragePlugin - StreamImpl StreamingSpanWriterPlugin -} - -// RegisterHandlers registers the plugin with the server -func (p *StorageGRPCPlugin) RegisterHandlers(s *grpc.Server) error { - handler := NewGRPCHandlerWithPlugins(p.Impl, p.ArchiveImpl, p.StreamImpl) - return handler.Register(s) -} - -// GRPCServer implements plugin.GRPCPlugin. It is used by go-plugin to create a grpc plugin server. -func (p *StorageGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - return p.RegisterHandlers(s) -} - -// GRPCClient implements plugin.GRPCPlugin. It is used by go-plugin to create a grpc plugin client. -func (*StorageGRPCPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, conn *grpc.ClientConn) (interface{}, error) { - return NewGRPCClient(conn), nil -} diff --git a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go index fad5d6c0db1..b186404afdd 100644 --- a/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go +++ b/vendor/github.com/jaegertracing/jaeger/plugin/storage/grpc/shared/streaming_writer.go @@ -1,16 +1,5 @@ // Copyright (c) 2022 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package shared @@ -78,7 +67,7 @@ func (s *streamingSpanWriter) getStream(ctx context.Context) (storage_v1.Streami if ok { return st, nil } - return nil, fmt.Errorf("plugin is closed") + return nil, errors.New("plugin is closed") default: return s.client.WriteSpanStream(ctx) } diff --git a/vendor/github.com/jaegertracing/jaeger/ports/ports.go b/vendor/github.com/jaegertracing/jaeger/ports/ports.go index caf035fc761..c1244dfb52b 100644 --- a/vendor/github.com/jaegertracing/jaeger/ports/ports.go +++ b/vendor/github.com/jaegertracing/jaeger/ports/ports.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package ports @@ -20,17 +9,6 @@ import ( ) const ( - // AgentJaegerThriftCompactUDP is the default port for receiving Jaeger Thrift over UDP in compact encoding - AgentJaegerThriftCompactUDP = 6831 - // AgentJaegerThriftBinaryUDP is the default port for receiving Jaeger Thrift over UDP in binary encoding - AgentJaegerThriftBinaryUDP = 6832 - // AgentZipkinThriftCompactUDP is the default port for receiving Zipkin Thrift over UDP in binary encoding - AgentZipkinThriftCompactUDP = 5775 - // AgentConfigServerHTTP is the default port for the agent's HTTP config server (e.g. /sampling endpoint) - AgentConfigServerHTTP = 5778 - // AgentAdminHTTP is the default admin HTTP port (health check, metrics, etc.) - AgentAdminHTTP = 14271 - // CollectorGRPC is the default port for gRPC server for sending spans CollectorGRPC = 14250 // CollectorHTTP is the default port for HTTP server for sending spans (e.g. /api/traces endpoint) @@ -40,6 +18,13 @@ const ( // CollectorZipkin is the port for Zipkin server for sending spans CollectorZipkin = 9411 + // CollectorV2GRPC is the HTTP port for remote sampling extension + CollectorV2SamplingHTTP = 5778 + // CollectorV2GRPC is the gRPC port for remote sampling extension + CollectorV2SamplingGRPC = 5779 + // CollectorV2HealthChecks is the port for health checks extension + CollectorV2HealthChecks = 13133 + // QueryGRPC is the default port of GRPC requests for Query trace retrieval QueryGRPC = 16685 // QueryHTTP is the default port for UI and Query API (e.g. /api/* endpoints) @@ -61,15 +46,6 @@ func PortToHostPort(port int) string { return ":" + strconv.Itoa(port) } -// GetAddressFromCLIOptions gets listening address based on port (deprecated flags) or host:port (new flags) -func GetAddressFromCLIOptions(port int, hostPort string) string { - if port != 0 { - return PortToHostPort(port) - } - - return FormatHostPort(hostPort) -} - // FormatHostPort returns hostPort in a usable format (host:port) if it wasn't already func FormatHostPort(hostPort string) string { if hostPort == "" { diff --git a/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go index 4a68149862d..07f8ddd3123 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/dependencystore/interface.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package dependencystore diff --git a/vendor/github.com/jaegertracing/jaeger/storage/doc.go b/vendor/github.com/jaegertracing/jaeger/storage/doc.go index 4b3b595ca6a..a2aaeda2514 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/doc.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/doc.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 // Package storage is the collection of different storage interfaces that are shared by two or more components. // diff --git a/vendor/github.com/jaegertracing/jaeger/storage/factory.go b/vendor/github.com/jaegertracing/jaeger/storage/factory.go index b56e6fdc07f..f3e8ea7f4af 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/factory.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/factory.go @@ -1,21 +1,11 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package storage import ( + "context" "errors" "go.uber.org/zap" @@ -53,7 +43,7 @@ type Factory interface { // Only meant to be used from integration tests. type Purger interface { // Purge removes all data from the storage. - Purge() error + Purge(context.Context) error } // SamplingStoreFactory defines an interface that is capable of returning the necessary backends for diff --git a/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go index dc62fd0fa56..05b1f67be97 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/metricsstore/interface.go @@ -1,16 +1,5 @@ // Copyright (c) 2021 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package metricsstore diff --git a/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go index 7c10aab8f40..ba2dbdf3cd8 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/samplingstore/interface.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package samplingstore diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go index 68f9607448c..e4f79141c0c 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/composite.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package spanstore diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go index dd354e4d080..ceb5040ac18 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/downsampling_writer.go @@ -1,16 +1,5 @@ // Copyright (c) 2019 The Jaeger Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package spanstore @@ -100,7 +89,7 @@ func NewSampler(ratio float64, hashSalt string) *Sampler { } hashSaltBytes := []byte(hashSalt) pool := &sync.Pool{ - New: func() interface{} { + New: func() any { buffer := make([]byte, len(hashSaltBytes)+traceIDByteSize) copy(buffer, hashSaltBytes) return &hasher{ diff --git a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go index 534cf8a2280..c4c29181502 100644 --- a/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go +++ b/vendor/github.com/jaegertracing/jaeger/storage/spanstore/interface.go @@ -1,17 +1,6 @@ // Copyright (c) 2019 The Jaeger Authors. // Copyright (c) 2017 Uber Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-License-Identifier: Apache-2.0 package spanstore diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 21508edbdb3..f06ba51c56b 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -281,6 +281,7 @@ Exit Code 1 | AMXBF16 | Tile computational operations on BFLOAT16 numbers | | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | +| AMXFP8 | Tile computational operations on FP8 numbers | | AMXTILE | Tile architecture | | APX_F | Intel APX | | AVX | AVX functions | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index 53bc18ca719..db99eb62f7b 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -55,6 +55,12 @@ const ( Qualcomm Marvell + QEMU + QNX + ACRN + SRE + Apple + lastVendor ) @@ -75,6 +81,7 @@ const ( AMXBF16 // Tile computational operations on BFLOAT16 numbers AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers + AMXFP8 // Tile computational operations on FP8 numbers AMXTILE // Tile architecture APX_F // Intel APX AVX // AVX functions @@ -296,20 +303,22 @@ const ( // CPUInfo contains information about the detected system CPU. type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - VendorString string // Raw vendor string. - featureSet flagSet // Features of the CPU - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - Stepping int // CPU stepping info - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. - BoostFreq int64 // Max clock speed, if known, 0 otherwise - Cache struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + VendorString string // Raw vendor string. + HypervisorVendorID Vendor // Hypervisor vendor + HypervisorVendorString string // Raw hypervisor vendor string + featureSet flagSet // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + Stepping int // CPU stepping info + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. + BoostFreq int64 // Max clock speed, if known, 0 otherwise + Cache struct { L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected L2 int // L2 Cache (per core or shared). Will be -1 if undetected @@ -318,8 +327,9 @@ type CPUInfo struct { SGX SGXSupport AMDMemEncryption AMDMemEncryptionSupport AVX10Level uint8 - maxFunc uint32 - maxExFunc uint32 + + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -503,7 +513,7 @@ func (c CPUInfo) FeatureSet() []string { // Uses the RDTSCP instruction. The value 0 is returned // if the CPU does not support the instruction. func (c CPUInfo) RTCounter() uint64 { - if !c.Supports(RDTSCP) { + if !c.Has(RDTSCP) { return 0 } a, _, _, d := rdtscpAsm() @@ -515,13 +525,22 @@ func (c CPUInfo) RTCounter() uint64 { // about the current cpu/core the code is running on. // If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. func (c CPUInfo) Ia32TscAux() uint32 { - if !c.Supports(RDTSCP) { + if !c.Has(RDTSCP) { return 0 } _, _, ecx, _ := rdtscpAsm() return ecx } +// SveLengths returns arm SVE vector and predicate lengths. +// Will return 0, 0 if SVE is not enabled or otherwise unable to detect. +func (c CPUInfo) SveLengths() (vl, pl uint64) { + if !c.Has(SVE) { + return 0, 0 + } + return getVectorLength() +} + // LogicalCPU will return the Logical CPU the code is currently executing on. // This is likely to change when the OS re-schedules the running thread // to another CPU. @@ -781,11 +800,16 @@ func threadsPerCore() int { _, b, _, _ := cpuidex(0xb, 0) if b&0xffff == 0 { if vend == AMD { - // Workaround for AMD returning 0, assume 2 if >= Zen 2 - // It will be more correct than not. + // if >= Zen 2 0x8000001e EBX 15-8 bits means threads per core. + // The number of threads per core is ThreadsPerCore+1 + // See PPR for AMD Family 17h Models 00h-0Fh (page 82) fam, _, _ := familyModel() _, _, _, d := cpuid(1) if (d&(1<<28)) != 0 && fam >= 23 { + if maxExtendedFunction() >= 0x8000001e { + _, b, _, _ := cpuid(0x8000001e) + return int((b>>8)&0xff) + 1 + } return 2 } } @@ -877,7 +901,9 @@ var vendorMapping = map[string]Vendor{ "GenuineTMx86": Transmeta, "Geode by NSC": NSC, "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, + "KVMKVMKVM": KVM, + "Linux KVM Hv": KVM, + "TCGTCGTCGTCG": QEMU, "Microsoft Hv": MSVM, "VMwareVMware": VMware, "XenVMMXenVMM": XenHVM, @@ -887,6 +913,10 @@ var vendorMapping = map[string]Vendor{ "SiS SiS SiS ": SiS, "RiseRiseRise": SiS, "Genuine RDC": RDC, + "QNXQVMBSQG": QNX, + "ACRNACRNACRN": ACRN, + "SRESRESRESRE": SRE, + "Apple VZ": Apple, } func vendorID() (Vendor, string) { @@ -899,6 +929,17 @@ func vendorID() (Vendor, string) { return vend, v } +func hypervisorVendorID() (Vendor, string) { + // https://lwn.net/Articles/301888/ + _, b, c, d := cpuid(0x40000000) + v := string(valAsString(b, c, d)) + vend, ok := vendorMapping[v] + if !ok { + return VendorUnknown, v + } + return vend, v +} + func cacheLine() int { if maxFunctionID() < 0x1 { return 0 @@ -1271,6 +1312,7 @@ func support() flagSet { fs.setIf(ebx&(1<<31) != 0, AVX512VL) // ecx fs.setIf(ecx&(1<<1) != 0, AVX512VBMI) + fs.setIf(ecx&(1<<3) != 0, AMXFP8) fs.setIf(ecx&(1<<6) != 0, AVX512VBMI2) fs.setIf(ecx&(1<<11) != 0, AVX512VNNI) fs.setIf(ecx&(1<<12) != 0, AVX512BITALG) diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s index b31d6aec43f..b196f78eb44 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid_arm64.s @@ -24,3 +24,13 @@ TEXT ·getInstAttributes(SB), 7, $0 MOVD R1, instAttrReg1+8(FP) RET +TEXT ·getVectorLength(SB), 7, $0 + WORD $0xd2800002 // mov x2, #0 + WORD $0x04225022 // addvl x2, x2, #1 + WORD $0xd37df042 // lsl x2, x2, #3 + WORD $0xd2800003 // mov x3, #0 + WORD $0x04635023 // addpl x3, x3, #1 + WORD $0xd37df063 // lsl x3, x3, #3 + MOVD R2, vl+0(FP) + MOVD R3, pl+8(FP) + RET diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go index 9a53504a042..566743d2204 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go @@ -10,6 +10,7 @@ import "runtime" func getMidr() (midr uint64) func getProcFeatures() (procFeatures uint64) func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) +func getVectorLength() (vl, pl uint64) func initCPU() { cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } @@ -24,7 +25,7 @@ func addInfo(c *CPUInfo, safe bool) { detectOS(c) // ARM64 disabled since it may crash if interrupt is not intercepted by OS. - if safe && !c.Supports(ARMCPUID) && runtime.GOOS != "freebsd" { + if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" { return } midr := getMidr() diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go index 9636c2bc17c..574f9389c07 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_ref.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_ref.go @@ -10,6 +10,8 @@ func initCPU() { cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } xgetbv = func(uint32) (a, b uint32) { return 0, 0 } rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } + } func addInfo(info *CPUInfo, safe bool) {} +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index 799b400c2ec..f924c9d8399 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -32,7 +32,10 @@ func addInfo(c *CPUInfo, safe bool) { c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() + c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID() c.AVX10Level = c.supportAVX10() c.cacheSize() c.frequencies() } + +func getVectorLength() (vl, pl uint64) { return 0, 0 } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 3a256031039..e7f874a7e8d 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -15,224 +15,225 @@ func _() { _ = x[AMXBF16-5] _ = x[AMXFP16-6] _ = x[AMXINT8-7] - _ = x[AMXTILE-8] - _ = x[APX_F-9] - _ = x[AVX-10] - _ = x[AVX10-11] - _ = x[AVX10_128-12] - _ = x[AVX10_256-13] - _ = x[AVX10_512-14] - _ = x[AVX2-15] - _ = x[AVX512BF16-16] - _ = x[AVX512BITALG-17] - _ = x[AVX512BW-18] - _ = x[AVX512CD-19] - _ = x[AVX512DQ-20] - _ = x[AVX512ER-21] - _ = x[AVX512F-22] - _ = x[AVX512FP16-23] - _ = x[AVX512IFMA-24] - _ = x[AVX512PF-25] - _ = x[AVX512VBMI-26] - _ = x[AVX512VBMI2-27] - _ = x[AVX512VL-28] - _ = x[AVX512VNNI-29] - _ = x[AVX512VP2INTERSECT-30] - _ = x[AVX512VPOPCNTDQ-31] - _ = x[AVXIFMA-32] - _ = x[AVXNECONVERT-33] - _ = x[AVXSLOW-34] - _ = x[AVXVNNI-35] - _ = x[AVXVNNIINT8-36] - _ = x[AVXVNNIINT16-37] - _ = x[BHI_CTRL-38] - _ = x[BMI1-39] - _ = x[BMI2-40] - _ = x[CETIBT-41] - _ = x[CETSS-42] - _ = x[CLDEMOTE-43] - _ = x[CLMUL-44] - _ = x[CLZERO-45] - _ = x[CMOV-46] - _ = x[CMPCCXADD-47] - _ = x[CMPSB_SCADBS_SHORT-48] - _ = x[CMPXCHG8-49] - _ = x[CPBOOST-50] - _ = x[CPPC-51] - _ = x[CX16-52] - _ = x[EFER_LMSLE_UNS-53] - _ = x[ENQCMD-54] - _ = x[ERMS-55] - _ = x[F16C-56] - _ = x[FLUSH_L1D-57] - _ = x[FMA3-58] - _ = x[FMA4-59] - _ = x[FP128-60] - _ = x[FP256-61] - _ = x[FSRM-62] - _ = x[FXSR-63] - _ = x[FXSROPT-64] - _ = x[GFNI-65] - _ = x[HLE-66] - _ = x[HRESET-67] - _ = x[HTT-68] - _ = x[HWA-69] - _ = x[HYBRID_CPU-70] - _ = x[HYPERVISOR-71] - _ = x[IA32_ARCH_CAP-72] - _ = x[IA32_CORE_CAP-73] - _ = x[IBPB-74] - _ = x[IBPB_BRTYPE-75] - _ = x[IBRS-76] - _ = x[IBRS_PREFERRED-77] - _ = x[IBRS_PROVIDES_SMP-78] - _ = x[IBS-79] - _ = x[IBSBRNTRGT-80] - _ = x[IBSFETCHSAM-81] - _ = x[IBSFFV-82] - _ = x[IBSOPCNT-83] - _ = x[IBSOPCNTEXT-84] - _ = x[IBSOPSAM-85] - _ = x[IBSRDWROPCNT-86] - _ = x[IBSRIPINVALIDCHK-87] - _ = x[IBS_FETCH_CTLX-88] - _ = x[IBS_OPDATA4-89] - _ = x[IBS_OPFUSE-90] - _ = x[IBS_PREVENTHOST-91] - _ = x[IBS_ZEN4-92] - _ = x[IDPRED_CTRL-93] - _ = x[INT_WBINVD-94] - _ = x[INVLPGB-95] - _ = x[KEYLOCKER-96] - _ = x[KEYLOCKERW-97] - _ = x[LAHF-98] - _ = x[LAM-99] - _ = x[LBRVIRT-100] - _ = x[LZCNT-101] - _ = x[MCAOVERFLOW-102] - _ = x[MCDT_NO-103] - _ = x[MCOMMIT-104] - _ = x[MD_CLEAR-105] - _ = x[MMX-106] - _ = x[MMXEXT-107] - _ = x[MOVBE-108] - _ = x[MOVDIR64B-109] - _ = x[MOVDIRI-110] - _ = x[MOVSB_ZL-111] - _ = x[MOVU-112] - _ = x[MPX-113] - _ = x[MSRIRC-114] - _ = x[MSRLIST-115] - _ = x[MSR_PAGEFLUSH-116] - _ = x[NRIPS-117] - _ = x[NX-118] - _ = x[OSXSAVE-119] - _ = x[PCONFIG-120] - _ = x[POPCNT-121] - _ = x[PPIN-122] - _ = x[PREFETCHI-123] - _ = x[PSFD-124] - _ = x[RDPRU-125] - _ = x[RDRAND-126] - _ = x[RDSEED-127] - _ = x[RDTSCP-128] - _ = x[RRSBA_CTRL-129] - _ = x[RTM-130] - _ = x[RTM_ALWAYS_ABORT-131] - _ = x[SBPB-132] - _ = x[SERIALIZE-133] - _ = x[SEV-134] - _ = x[SEV_64BIT-135] - _ = x[SEV_ALTERNATIVE-136] - _ = x[SEV_DEBUGSWAP-137] - _ = x[SEV_ES-138] - _ = x[SEV_RESTRICTED-139] - _ = x[SEV_SNP-140] - _ = x[SGX-141] - _ = x[SGXLC-142] - _ = x[SHA-143] - _ = x[SME-144] - _ = x[SME_COHERENT-145] - _ = x[SPEC_CTRL_SSBD-146] - _ = x[SRBDS_CTRL-147] - _ = x[SRSO_MSR_FIX-148] - _ = x[SRSO_NO-149] - _ = x[SRSO_USER_KERNEL_NO-150] - _ = x[SSE-151] - _ = x[SSE2-152] - _ = x[SSE3-153] - _ = x[SSE4-154] - _ = x[SSE42-155] - _ = x[SSE4A-156] - _ = x[SSSE3-157] - _ = x[STIBP-158] - _ = x[STIBP_ALWAYSON-159] - _ = x[STOSB_SHORT-160] - _ = x[SUCCOR-161] - _ = x[SVM-162] - _ = x[SVMDA-163] - _ = x[SVMFBASID-164] - _ = x[SVML-165] - _ = x[SVMNP-166] - _ = x[SVMPF-167] - _ = x[SVMPFT-168] - _ = x[SYSCALL-169] - _ = x[SYSEE-170] - _ = x[TBM-171] - _ = x[TDX_GUEST-172] - _ = x[TLB_FLUSH_NESTED-173] - _ = x[TME-174] - _ = x[TOPEXT-175] - _ = x[TSCRATEMSR-176] - _ = x[TSXLDTRK-177] - _ = x[VAES-178] - _ = x[VMCBCLEAN-179] - _ = x[VMPL-180] - _ = x[VMSA_REGPROT-181] - _ = x[VMX-182] - _ = x[VPCLMULQDQ-183] - _ = x[VTE-184] - _ = x[WAITPKG-185] - _ = x[WBNOINVD-186] - _ = x[WRMSRNS-187] - _ = x[X87-188] - _ = x[XGETBV1-189] - _ = x[XOP-190] - _ = x[XSAVE-191] - _ = x[XSAVEC-192] - _ = x[XSAVEOPT-193] - _ = x[XSAVES-194] - _ = x[AESARM-195] - _ = x[ARMCPUID-196] - _ = x[ASIMD-197] - _ = x[ASIMDDP-198] - _ = x[ASIMDHP-199] - _ = x[ASIMDRDM-200] - _ = x[ATOMICS-201] - _ = x[CRC32-202] - _ = x[DCPOP-203] - _ = x[EVTSTRM-204] - _ = x[FCMA-205] - _ = x[FP-206] - _ = x[FPHP-207] - _ = x[GPA-208] - _ = x[JSCVT-209] - _ = x[LRCPC-210] - _ = x[PMULL-211] - _ = x[SHA1-212] - _ = x[SHA2-213] - _ = x[SHA3-214] - _ = x[SHA512-215] - _ = x[SM3-216] - _ = x[SM4-217] - _ = x[SVE-218] - _ = x[lastID-219] + _ = x[AMXFP8-8] + _ = x[AMXTILE-9] + _ = x[APX_F-10] + _ = x[AVX-11] + _ = x[AVX10-12] + _ = x[AVX10_128-13] + _ = x[AVX10_256-14] + _ = x[AVX10_512-15] + _ = x[AVX2-16] + _ = x[AVX512BF16-17] + _ = x[AVX512BITALG-18] + _ = x[AVX512BW-19] + _ = x[AVX512CD-20] + _ = x[AVX512DQ-21] + _ = x[AVX512ER-22] + _ = x[AVX512F-23] + _ = x[AVX512FP16-24] + _ = x[AVX512IFMA-25] + _ = x[AVX512PF-26] + _ = x[AVX512VBMI-27] + _ = x[AVX512VBMI2-28] + _ = x[AVX512VL-29] + _ = x[AVX512VNNI-30] + _ = x[AVX512VP2INTERSECT-31] + _ = x[AVX512VPOPCNTDQ-32] + _ = x[AVXIFMA-33] + _ = x[AVXNECONVERT-34] + _ = x[AVXSLOW-35] + _ = x[AVXVNNI-36] + _ = x[AVXVNNIINT8-37] + _ = x[AVXVNNIINT16-38] + _ = x[BHI_CTRL-39] + _ = x[BMI1-40] + _ = x[BMI2-41] + _ = x[CETIBT-42] + _ = x[CETSS-43] + _ = x[CLDEMOTE-44] + _ = x[CLMUL-45] + _ = x[CLZERO-46] + _ = x[CMOV-47] + _ = x[CMPCCXADD-48] + _ = x[CMPSB_SCADBS_SHORT-49] + _ = x[CMPXCHG8-50] + _ = x[CPBOOST-51] + _ = x[CPPC-52] + _ = x[CX16-53] + _ = x[EFER_LMSLE_UNS-54] + _ = x[ENQCMD-55] + _ = x[ERMS-56] + _ = x[F16C-57] + _ = x[FLUSH_L1D-58] + _ = x[FMA3-59] + _ = x[FMA4-60] + _ = x[FP128-61] + _ = x[FP256-62] + _ = x[FSRM-63] + _ = x[FXSR-64] + _ = x[FXSROPT-65] + _ = x[GFNI-66] + _ = x[HLE-67] + _ = x[HRESET-68] + _ = x[HTT-69] + _ = x[HWA-70] + _ = x[HYBRID_CPU-71] + _ = x[HYPERVISOR-72] + _ = x[IA32_ARCH_CAP-73] + _ = x[IA32_CORE_CAP-74] + _ = x[IBPB-75] + _ = x[IBPB_BRTYPE-76] + _ = x[IBRS-77] + _ = x[IBRS_PREFERRED-78] + _ = x[IBRS_PROVIDES_SMP-79] + _ = x[IBS-80] + _ = x[IBSBRNTRGT-81] + _ = x[IBSFETCHSAM-82] + _ = x[IBSFFV-83] + _ = x[IBSOPCNT-84] + _ = x[IBSOPCNTEXT-85] + _ = x[IBSOPSAM-86] + _ = x[IBSRDWROPCNT-87] + _ = x[IBSRIPINVALIDCHK-88] + _ = x[IBS_FETCH_CTLX-89] + _ = x[IBS_OPDATA4-90] + _ = x[IBS_OPFUSE-91] + _ = x[IBS_PREVENTHOST-92] + _ = x[IBS_ZEN4-93] + _ = x[IDPRED_CTRL-94] + _ = x[INT_WBINVD-95] + _ = x[INVLPGB-96] + _ = x[KEYLOCKER-97] + _ = x[KEYLOCKERW-98] + _ = x[LAHF-99] + _ = x[LAM-100] + _ = x[LBRVIRT-101] + _ = x[LZCNT-102] + _ = x[MCAOVERFLOW-103] + _ = x[MCDT_NO-104] + _ = x[MCOMMIT-105] + _ = x[MD_CLEAR-106] + _ = x[MMX-107] + _ = x[MMXEXT-108] + _ = x[MOVBE-109] + _ = x[MOVDIR64B-110] + _ = x[MOVDIRI-111] + _ = x[MOVSB_ZL-112] + _ = x[MOVU-113] + _ = x[MPX-114] + _ = x[MSRIRC-115] + _ = x[MSRLIST-116] + _ = x[MSR_PAGEFLUSH-117] + _ = x[NRIPS-118] + _ = x[NX-119] + _ = x[OSXSAVE-120] + _ = x[PCONFIG-121] + _ = x[POPCNT-122] + _ = x[PPIN-123] + _ = x[PREFETCHI-124] + _ = x[PSFD-125] + _ = x[RDPRU-126] + _ = x[RDRAND-127] + _ = x[RDSEED-128] + _ = x[RDTSCP-129] + _ = x[RRSBA_CTRL-130] + _ = x[RTM-131] + _ = x[RTM_ALWAYS_ABORT-132] + _ = x[SBPB-133] + _ = x[SERIALIZE-134] + _ = x[SEV-135] + _ = x[SEV_64BIT-136] + _ = x[SEV_ALTERNATIVE-137] + _ = x[SEV_DEBUGSWAP-138] + _ = x[SEV_ES-139] + _ = x[SEV_RESTRICTED-140] + _ = x[SEV_SNP-141] + _ = x[SGX-142] + _ = x[SGXLC-143] + _ = x[SHA-144] + _ = x[SME-145] + _ = x[SME_COHERENT-146] + _ = x[SPEC_CTRL_SSBD-147] + _ = x[SRBDS_CTRL-148] + _ = x[SRSO_MSR_FIX-149] + _ = x[SRSO_NO-150] + _ = x[SRSO_USER_KERNEL_NO-151] + _ = x[SSE-152] + _ = x[SSE2-153] + _ = x[SSE3-154] + _ = x[SSE4-155] + _ = x[SSE42-156] + _ = x[SSE4A-157] + _ = x[SSSE3-158] + _ = x[STIBP-159] + _ = x[STIBP_ALWAYSON-160] + _ = x[STOSB_SHORT-161] + _ = x[SUCCOR-162] + _ = x[SVM-163] + _ = x[SVMDA-164] + _ = x[SVMFBASID-165] + _ = x[SVML-166] + _ = x[SVMNP-167] + _ = x[SVMPF-168] + _ = x[SVMPFT-169] + _ = x[SYSCALL-170] + _ = x[SYSEE-171] + _ = x[TBM-172] + _ = x[TDX_GUEST-173] + _ = x[TLB_FLUSH_NESTED-174] + _ = x[TME-175] + _ = x[TOPEXT-176] + _ = x[TSCRATEMSR-177] + _ = x[TSXLDTRK-178] + _ = x[VAES-179] + _ = x[VMCBCLEAN-180] + _ = x[VMPL-181] + _ = x[VMSA_REGPROT-182] + _ = x[VMX-183] + _ = x[VPCLMULQDQ-184] + _ = x[VTE-185] + _ = x[WAITPKG-186] + _ = x[WBNOINVD-187] + _ = x[WRMSRNS-188] + _ = x[X87-189] + _ = x[XGETBV1-190] + _ = x[XOP-191] + _ = x[XSAVE-192] + _ = x[XSAVEC-193] + _ = x[XSAVEOPT-194] + _ = x[XSAVES-195] + _ = x[AESARM-196] + _ = x[ARMCPUID-197] + _ = x[ASIMD-198] + _ = x[ASIMDDP-199] + _ = x[ASIMDHP-200] + _ = x[ASIMDRDM-201] + _ = x[ATOMICS-202] + _ = x[CRC32-203] + _ = x[DCPOP-204] + _ = x[EVTSTRM-205] + _ = x[FCMA-206] + _ = x[FP-207] + _ = x[FPHP-208] + _ = x[GPA-209] + _ = x[JSCVT-210] + _ = x[LRCPC-211] + _ = x[PMULL-212] + _ = x[SHA1-213] + _ = x[SHA2-214] + _ = x[SHA3-215] + _ = x[SHA512-216] + _ = x[SM3-217] + _ = x[SM4-218] + _ = x[SVE-219] + _ = x[lastID-220] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 323, 331, 335, 339, 345, 350, 358, 363, 369, 373, 382, 400, 408, 415, 419, 423, 437, 443, 447, 451, 460, 464, 468, 473, 478, 482, 486, 493, 497, 500, 506, 509, 512, 522, 532, 545, 558, 562, 573, 577, 591, 608, 611, 621, 632, 638, 646, 657, 665, 677, 693, 707, 718, 728, 743, 751, 762, 772, 779, 788, 798, 802, 805, 812, 817, 828, 835, 842, 850, 853, 859, 864, 873, 880, 888, 892, 895, 901, 908, 921, 926, 928, 935, 942, 948, 952, 961, 965, 970, 976, 982, 988, 998, 1001, 1017, 1021, 1030, 1033, 1042, 1057, 1070, 1076, 1090, 1097, 1100, 1105, 1108, 1111, 1123, 1137, 1147, 1159, 1166, 1185, 1188, 1192, 1196, 1200, 1205, 1210, 1215, 1220, 1234, 1245, 1251, 1254, 1259, 1268, 1272, 1277, 1282, 1288, 1295, 1300, 1303, 1312, 1328, 1331, 1337, 1347, 1355, 1359, 1368, 1372, 1384, 1387, 1397, 1400, 1407, 1415, 1422, 1425, 1432, 1435, 1440, 1446, 1454, 1460, 1466, 1474, 1479, 1486, 1493, 1501, 1508, 1513, 1518, 1525, 1529, 1531, 1535, 1538, 1543, 1548, 1553, 1557, 1561, 1565, 1571, 1574, 1577, 1580, 1586} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 73, 76, 81, 90, 99, 108, 112, 122, 134, 142, 150, 158, 166, 173, 183, 193, 201, 211, 222, 230, 240, 258, 273, 280, 292, 299, 306, 317, 329, 337, 341, 345, 351, 356, 364, 369, 375, 379, 388, 406, 414, 421, 425, 429, 443, 449, 453, 457, 466, 470, 474, 479, 484, 488, 492, 499, 503, 506, 512, 515, 518, 528, 538, 551, 564, 568, 579, 583, 597, 614, 617, 627, 638, 644, 652, 663, 671, 683, 699, 713, 724, 734, 749, 757, 768, 778, 785, 794, 804, 808, 811, 818, 823, 834, 841, 848, 856, 859, 865, 870, 879, 886, 894, 898, 901, 907, 914, 927, 932, 934, 941, 948, 954, 958, 967, 971, 976, 982, 988, 994, 1004, 1007, 1023, 1027, 1036, 1039, 1048, 1063, 1076, 1082, 1096, 1103, 1106, 1111, 1114, 1117, 1129, 1143, 1153, 1165, 1172, 1191, 1194, 1198, 1202, 1206, 1211, 1216, 1221, 1226, 1240, 1251, 1257, 1260, 1265, 1274, 1278, 1283, 1288, 1294, 1301, 1306, 1309, 1318, 1334, 1337, 1343, 1353, 1361, 1365, 1374, 1378, 1390, 1393, 1403, 1406, 1413, 1421, 1428, 1431, 1438, 1441, 1446, 1452, 1460, 1466, 1472, 1480, 1485, 1492, 1499, 1507, 1514, 1519, 1524, 1531, 1535, 1537, 1541, 1544, 1549, 1554, 1559, 1563, 1567, 1571, 1577, 1580, 1583, 1586, 1592} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { @@ -270,12 +271,17 @@ func _() { _ = x[AMCC-23] _ = x[Qualcomm-24] _ = x[Marvell-25] - _ = x[lastVendor-26] + _ = x[QEMU-26] + _ = x[QNX-27] + _ = x[ACRN-28] + _ = x[SRE-29] + _ = x[Apple-30] + _ = x[lastVendor-31] } -const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvelllastVendor" +const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor" -var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 155} +var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174} func (i Vendor) String() string { if i < 0 || i >= Vendor(len(_Vendor_index)-1) { diff --git a/vendor/github.com/knadh/koanf/v2/README.md b/vendor/github.com/knadh/koanf/v2/README.md index d7412704ee5..b4947e556f6 100644 --- a/vendor/github.com/knadh/koanf/v2/README.md +++ b/vendor/github.com/knadh/koanf/v2/README.md @@ -8,7 +8,7 @@ koanf v2 has modules (Providers) for reading configuration from a variety of sou All external dependencies in providers and parsers are detached from the core and can be installed separately as necessary. -[![Run Tests](https://github.com/knadh/koanf/actions/workflows/test.yml/badge.svg)](https://github.com/knadh/koanf/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/knadh/koanf?status.svg)](https://godoc.org/github.com/knadh/koanf) +[![Run Tests](https://github.com/knadh/koanf/actions/workflows/test.yml/badge.svg)](https://github.com/knadh/koanf/actions/workflows/test.yml) [![GoDoc](https://pkg.go.dev/badge/github.com/knadh/koanf?utm_source=godoc)](https://pkg.go.dev/github.com/knadh/koanf/v2) ### Installation @@ -26,11 +26,10 @@ go get -u github.com/knadh/koanf/providers/file # Install the necessary Parser(s). -# Available: toml, json, yaml, dotenv, hcl, hjson, nestedtext +# Available: toml, toml/v2, json, yaml, dotenv, hcl, hjson, nestedtext # go get -u github.com/knadh/koanf/parsers/$parser go get -u github.com/knadh/koanf/parsers/toml - ``` [See the list](#api) of all bundled Providers and Parsers. @@ -146,6 +145,9 @@ func main() { k.Print() }) + // To stop a file watcher, call: + // f.Unwatch() + // Block forever (and manually make a change to mock/mock.json) to // reload the config. log.Println("waiting forever. Try making a change to mock/mock.json to live reload") @@ -168,6 +170,10 @@ import ( "github.com/knadh/koanf/v2" "github.com/knadh/koanf/parsers/toml" + + // TOML version 2 is available at: + // "github.com/knadh/koanf/parsers/toml/v2" + "github.com/knadh/koanf/providers/file" "github.com/knadh/koanf/providers/posflag" flag "github.com/spf13/pflag" @@ -317,7 +323,7 @@ func main() { ``` ### Unmarshalling and marshalling -`Parser`s can be used to unmarshal and scan the values in a Koanf instance into a struct based on the field tags, and to marshal a Koanf instance back into serialized bytes, for example, back to JSON or YAML, to write back to files. +`Parser`s can be used to unmarshal and scan the values in a Koanf instance into a struct based on the field tags, and to marshal a Koanf instance back into serialized bytes, for example to JSON or YAML files ```go package main @@ -366,7 +372,7 @@ func main() { fmt.Println(out) // Marshal the instance back to JSON. - // The paser instance can be anything, eg: json.Paser(), yaml.Parser() etc. + // The parser instance can be anything, eg: json.Parser(), yaml.Parser() etc. b, _ := k.Marshal(parser) fmt.Println(string(b)) } @@ -677,6 +683,7 @@ Install with `go get -u github.com/knadh/koanf/parsers/$parser` | json | `json.Parser()` | Parses JSON bytes into a nested map | | yaml | `yaml.Parser()` | Parses YAML bytes into a nested map | | toml | `toml.Parser()` | Parses TOML bytes into a nested map | +| toml/v2 | `toml.Parser()` | Parses TOML bytes into a nested map (using go-toml v2) | | dotenv | `dotenv.Parser()` | Parses DotEnv bytes into a flat map | | hcl | `hcl.Parser(flattenSlices bool)` | Parses Hashicorp HCL bytes into a nested map. `flattenSlices` is recommended to be set to true. [Read more](https://github.com/hashicorp/hcl/issues/162). | | nestedtext | `nestedtext.Parser()` | Parses NestedText bytes into a flat map | diff --git a/vendor/github.com/knadh/koanf/v2/go.work.sum b/vendor/github.com/knadh/koanf/v2/go.work.sum index b879b3bf7de..eab7bcf845e 100644 --- a/vendor/github.com/knadh/koanf/v2/go.work.sum +++ b/vendor/github.com/knadh/koanf/v2/go.work.sum @@ -125,7 +125,6 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= @@ -137,18 +136,18 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go index a101b911906..eaff362c345 100644 --- a/vendor/github.com/lufia/plan9stats/cpu.go +++ b/vendor/github.com/lufia/plan9stats/cpu.go @@ -178,9 +178,12 @@ func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { var up uint32parser pids := make([]uint32, len(names)) for i, s := range names { + if s == "trace" { + continue + } pids[i] = up.Parse(s) } - if up.err != nil { + if err := up.err; err != nil { return nil, err } sort.Slice(pids, func(i, j int) bool { diff --git a/vendor/github.com/magefile/mage/LICENSE b/vendor/github.com/magefile/mage/LICENSE new file mode 100644 index 00000000000..d0632bc1458 --- /dev/null +++ b/vendor/github.com/magefile/mage/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 the Mage authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/magefile/mage/mg/color.go b/vendor/github.com/magefile/mage/mg/color.go new file mode 100644 index 00000000000..3e27103325a --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/color.go @@ -0,0 +1,80 @@ +package mg + +// Color is ANSI color type +type Color int + +// If you add/change/remove any items in this constant, +// you will need to run "stringer -type=Color" in this directory again. +// NOTE: Please keep the list in an alphabetical order. +const ( + Black Color = iota + Red + Green + Yellow + Blue + Magenta + Cyan + White + BrightBlack + BrightRed + BrightGreen + BrightYellow + BrightBlue + BrightMagenta + BrightCyan + BrightWhite +) + +// AnsiColor are ANSI color codes for supported terminal colors. +var ansiColor = map[Color]string{ + Black: "\u001b[30m", + Red: "\u001b[31m", + Green: "\u001b[32m", + Yellow: "\u001b[33m", + Blue: "\u001b[34m", + Magenta: "\u001b[35m", + Cyan: "\u001b[36m", + White: "\u001b[37m", + BrightBlack: "\u001b[30;1m", + BrightRed: "\u001b[31;1m", + BrightGreen: "\u001b[32;1m", + BrightYellow: "\u001b[33;1m", + BrightBlue: "\u001b[34;1m", + BrightMagenta: "\u001b[35;1m", + BrightCyan: "\u001b[36;1m", + BrightWhite: "\u001b[37;1m", +} + +// AnsiColorReset is an ANSI color code to reset the terminal color. +const AnsiColorReset = "\033[0m" + +// DefaultTargetAnsiColor is a default ANSI color for colorizing targets. +// It is set to Cyan as an arbitrary color, because it has a neutral meaning +var DefaultTargetAnsiColor = ansiColor[Cyan] + +func toLowerCase(s string) string { + // this is a naive implementation + // borrowed from https://golang.org/src/strings/strings.go + // and only considers alphabetical characters [a-zA-Z] + // so that we don't depend on the "strings" package + buf := make([]byte, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + buf[i] = c + } + return string(buf) +} + +func getAnsiColor(color string) (string, bool) { + colorLower := toLowerCase(color) + for k, v := range ansiColor { + colorConstLower := toLowerCase(k.String()) + if colorConstLower == colorLower { + return v, true + } + } + return "", false +} diff --git a/vendor/github.com/magefile/mage/mg/color_string.go b/vendor/github.com/magefile/mage/mg/color_string.go new file mode 100644 index 00000000000..06debca5404 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/color_string.go @@ -0,0 +1,38 @@ +// Code generated by "stringer -type=Color"; DO NOT EDIT. + +package mg + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Black-0] + _ = x[Red-1] + _ = x[Green-2] + _ = x[Yellow-3] + _ = x[Blue-4] + _ = x[Magenta-5] + _ = x[Cyan-6] + _ = x[White-7] + _ = x[BrightBlack-8] + _ = x[BrightRed-9] + _ = x[BrightGreen-10] + _ = x[BrightYellow-11] + _ = x[BrightBlue-12] + _ = x[BrightMagenta-13] + _ = x[BrightCyan-14] + _ = x[BrightWhite-15] +} + +const _Color_name = "BlackRedGreenYellowBlueMagentaCyanWhiteBrightBlackBrightRedBrightGreenBrightYellowBrightBlueBrightMagentaBrightCyanBrightWhite" + +var _Color_index = [...]uint8{0, 5, 8, 13, 19, 23, 30, 34, 39, 50, 59, 70, 82, 92, 105, 115, 126} + +func (i Color) String() string { + if i < 0 || i >= Color(len(_Color_index)-1) { + return "Color(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Color_name[_Color_index[i]:_Color_index[i+1]] +} diff --git a/vendor/github.com/magefile/mage/mg/deps.go b/vendor/github.com/magefile/mage/mg/deps.go new file mode 100644 index 00000000000..f0c2509b833 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/deps.go @@ -0,0 +1,211 @@ +package mg + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strings" + "sync" +) + +var logger = log.New(os.Stderr, "", 0) + +type onceMap struct { + mu *sync.Mutex + m map[onceKey]*onceFun +} + +type onceKey struct { + Name string + ID string +} + +func (o *onceMap) LoadOrStore(f Fn) *onceFun { + defer o.mu.Unlock() + o.mu.Lock() + + key := onceKey{ + Name: f.Name(), + ID: f.ID(), + } + existing, ok := o.m[key] + if ok { + return existing + } + one := &onceFun{ + once: &sync.Once{}, + fn: f, + displayName: displayName(f.Name()), + } + o.m[key] = one + return one +} + +var onces = &onceMap{ + mu: &sync.Mutex{}, + m: map[onceKey]*onceFun{}, +} + +// SerialDeps is like Deps except it runs each dependency serially, instead of +// in parallel. This can be useful for resource intensive dependencies that +// shouldn't be run at the same time. +func SerialDeps(fns ...interface{}) { + funcs := checkFns(fns) + ctx := context.Background() + for i := range fns { + runDeps(ctx, funcs[i:i+1]) + } +} + +// SerialCtxDeps is like CtxDeps except it runs each dependency serially, +// instead of in parallel. This can be useful for resource intensive +// dependencies that shouldn't be run at the same time. +func SerialCtxDeps(ctx context.Context, fns ...interface{}) { + funcs := checkFns(fns) + for i := range fns { + runDeps(ctx, funcs[i:i+1]) + } +} + +// CtxDeps runs the given functions as dependencies of the calling function. +// Dependencies must only be of type: +// func() +// func() error +// func(context.Context) +// func(context.Context) error +// Or a similar method on a mg.Namespace type. +// Or an mg.Fn interface. +// +// The function calling Deps is guaranteed that all dependent functions will be +// run exactly once when Deps returns. Dependent functions may in turn declare +// their own dependencies using Deps. Each dependency is run in their own +// goroutines. Each function is given the context provided if the function +// prototype allows for it. +func CtxDeps(ctx context.Context, fns ...interface{}) { + funcs := checkFns(fns) + runDeps(ctx, funcs) +} + +// runDeps assumes you've already called checkFns. +func runDeps(ctx context.Context, fns []Fn) { + mu := &sync.Mutex{} + var errs []string + var exit int + wg := &sync.WaitGroup{} + for _, f := range fns { + fn := onces.LoadOrStore(f) + wg.Add(1) + go func() { + defer func() { + if v := recover(); v != nil { + mu.Lock() + if err, ok := v.(error); ok { + exit = changeExit(exit, ExitStatus(err)) + } else { + exit = changeExit(exit, 1) + } + errs = append(errs, fmt.Sprint(v)) + mu.Unlock() + } + wg.Done() + }() + if err := fn.run(ctx); err != nil { + mu.Lock() + errs = append(errs, fmt.Sprint(err)) + exit = changeExit(exit, ExitStatus(err)) + mu.Unlock() + } + }() + } + + wg.Wait() + if len(errs) > 0 { + panic(Fatal(exit, strings.Join(errs, "\n"))) + } +} + +func checkFns(fns []interface{}) []Fn { + funcs := make([]Fn, len(fns)) + for i, f := range fns { + if fn, ok := f.(Fn); ok { + funcs[i] = fn + continue + } + + // Check if the target provided is a not function so we can give a clear warning + t := reflect.TypeOf(f) + if t == nil || t.Kind() != reflect.Func { + panic(fmt.Errorf("non-function used as a target dependency: %T. The mg.Deps, mg.SerialDeps and mg.CtxDeps functions accept function names, such as mg.Deps(TargetA, TargetB)", f)) + } + + funcs[i] = F(f) + } + return funcs +} + +// Deps runs the given functions in parallel, exactly once. Dependencies must +// only be of type: +// func() +// func() error +// func(context.Context) +// func(context.Context) error +// Or a similar method on a mg.Namespace type. +// Or an mg.Fn interface. +// +// This is a way to build up a tree of dependencies with each dependency +// defining its own dependencies. Functions must have the same signature as a +// Mage target, i.e. optional context argument, optional error return. +func Deps(fns ...interface{}) { + CtxDeps(context.Background(), fns...) +} + +func changeExit(old, new int) int { + if new == 0 { + return old + } + if old == 0 { + return new + } + if old == new { + return old + } + // both different and both non-zero, just set + // exit to 1. Nothing more we can do. + return 1 +} + +// funcName returns the unique name for the function +func funcName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +func displayName(name string) string { + splitByPackage := strings.Split(name, ".") + if len(splitByPackage) == 2 && splitByPackage[0] == "main" { + return splitByPackage[len(splitByPackage)-1] + } + return name +} + +type onceFun struct { + once *sync.Once + fn Fn + err error + + displayName string +} + +// run will run the function exactly once and capture the error output. Further runs simply return +// the same error output. +func (o *onceFun) run(ctx context.Context) error { + o.once.Do(func() { + if Verbose() { + logger.Println("Running dependency:", displayName(o.fn.Name())) + } + o.err = o.fn.Run(ctx) + }) + return o.err +} diff --git a/vendor/github.com/magefile/mage/mg/errors.go b/vendor/github.com/magefile/mage/mg/errors.go new file mode 100644 index 00000000000..2dd780fe3db --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/errors.go @@ -0,0 +1,51 @@ +package mg + +import ( + "errors" + "fmt" +) + +type fatalErr struct { + code int + error +} + +func (f fatalErr) ExitStatus() int { + return f.code +} + +type exitStatus interface { + ExitStatus() int +} + +// Fatal returns an error that will cause mage to print out the +// given args and exit with the given exit code. +func Fatal(code int, args ...interface{}) error { + return fatalErr{ + code: code, + error: errors.New(fmt.Sprint(args...)), + } +} + +// Fatalf returns an error that will cause mage to print out the +// given message and exit with the given exit code. +func Fatalf(code int, format string, args ...interface{}) error { + return fatalErr{ + code: code, + error: fmt.Errorf(format, args...), + } +} + +// ExitStatus queries the error for an exit status. If the error is nil, it +// returns 0. If the error does not implement ExitStatus() int, it returns 1. +// Otherwise it retiurns the value from ExitStatus(). +func ExitStatus(err error) int { + if err == nil { + return 0 + } + exit, ok := err.(exitStatus) + if !ok { + return 1 + } + return exit.ExitStatus() +} diff --git a/vendor/github.com/magefile/mage/mg/fn.go b/vendor/github.com/magefile/mage/mg/fn.go new file mode 100644 index 00000000000..3856857acea --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/fn.go @@ -0,0 +1,192 @@ +package mg + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// Fn represents a function that can be run with mg.Deps. Package, Name, and ID must combine to +// uniquely identify a function, while ensuring the "same" function has identical values. These are +// used as a map key to find and run (or not run) the function. +type Fn interface { + // Name should return the fully qualified name of the function. Usually + // it's best to use runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name(). + Name() string + + // ID should be an additional uniqueness qualifier in case the name is insufficiently unique. + // This can be the case for functions that take arguments (mg.F json-encodes an array of the + // args). + ID() string + + // Run should run the function. + Run(ctx context.Context) error +} + +// F takes a function that is compatible as a mage target, and any args that need to be passed to +// it, and wraps it in an mg.Fn that mg.Deps can run. Args must be passed in the same order as they +// are declared by the function. Note that you do not need to and should not pass a context.Context +// to F, even if the target takes a context. Compatible args are int, bool, string, and +// time.Duration. +func F(target interface{}, args ...interface{}) Fn { + hasContext, isNamespace, err := checkF(target, args) + if err != nil { + panic(err) + } + id, err := json.Marshal(args) + if err != nil { + panic(fmt.Errorf("can't convert args into a mage-compatible id for mg.Deps: %s", err)) + } + return fn{ + name: funcName(target), + id: string(id), + f: func(ctx context.Context) error { + v := reflect.ValueOf(target) + count := len(args) + if hasContext { + count++ + } + if isNamespace { + count++ + } + vargs := make([]reflect.Value, count) + x := 0 + if isNamespace { + vargs[0] = reflect.ValueOf(struct{}{}) + x++ + } + if hasContext { + vargs[x] = reflect.ValueOf(ctx) + x++ + } + for y := range args { + vargs[x+y] = reflect.ValueOf(args[y]) + } + ret := v.Call(vargs) + if len(ret) > 0 { + // we only allow functions with a single error return, so this should be safe. + if ret[0].IsNil() { + return nil + } + return ret[0].Interface().(error) + } + return nil + }, + } +} + +type fn struct { + name string + id string + f func(ctx context.Context) error +} + +// Name returns the fully qualified name of the function. +func (f fn) Name() string { + return f.name +} + +// ID returns a hash of the argument values passed in +func (f fn) ID() string { + return f.id +} + +// Run runs the function. +func (f fn) Run(ctx context.Context) error { + return f.f(ctx) +} + +func checkF(target interface{}, args []interface{}) (hasContext, isNamespace bool, _ error) { + t := reflect.TypeOf(target) + if t == nil || t.Kind() != reflect.Func { + return false, false, fmt.Errorf("non-function passed to mg.F: %T. The mg.F function accepts function names, such as mg.F(TargetA, \"arg1\", \"arg2\")", target) + } + + if t.NumOut() > 1 { + return false, false, fmt.Errorf("target has too many return values, must be zero or just an error: %T", target) + } + if t.NumOut() == 1 && t.Out(0) != errType { + return false, false, fmt.Errorf("target's return value is not an error") + } + + // more inputs than slots is an error if not variadic + if len(args) > t.NumIn() && !t.IsVariadic() { + return false, false, fmt.Errorf("too many arguments for target, got %d for %T", len(args), target) + } + + if t.NumIn() == 0 { + return false, false, nil + } + + x := 0 + inputs := t.NumIn() + + if t.In(0).AssignableTo(emptyType) { + // nameSpace func + isNamespace = true + x++ + // callers must leave off the namespace value + inputs-- + } + if t.NumIn() > x && t.In(x) == ctxType { + // callers must leave off the context + inputs-- + + // let the upper function know it should pass us a context. + hasContext = true + + // skip checking the first argument in the below loop if it's a context, since first arg is + // special. + x++ + } + + if t.IsVariadic() { + if len(args) < inputs-1 { + return false, false, fmt.Errorf("too few arguments for target, got %d for %T", len(args), target) + + } + } else if len(args) != inputs { + return false, false, fmt.Errorf("wrong number of arguments for target, got %d for %T", len(args), target) + } + + for _, arg := range args { + argT := t.In(x) + if t.IsVariadic() && x == t.NumIn()-1 { + // For the variadic argument, use the slice element type. + argT = argT.Elem() + } + if !argTypes[argT] { + return false, false, fmt.Errorf("argument %d (%s), is not a supported argument type", x, argT) + } + passedT := reflect.TypeOf(arg) + if argT != passedT { + return false, false, fmt.Errorf("argument %d expected to be %s, but is %s", x, argT, passedT) + } + if x < t.NumIn()-1 { + x++ + } + } + return hasContext, isNamespace, nil +} + +// Here we define the types that are supported as arguments/returns +var ( + ctxType = reflect.TypeOf(func(context.Context) {}).In(0) + errType = reflect.TypeOf(func() error { return nil }).Out(0) + emptyType = reflect.TypeOf(struct{}{}) + + intType = reflect.TypeOf(int(0)) + stringType = reflect.TypeOf(string("")) + boolType = reflect.TypeOf(bool(false)) + durType = reflect.TypeOf(time.Second) + + // don't put ctx in here, this is for non-context types + argTypes = map[reflect.Type]bool{ + intType: true, + boolType: true, + stringType: true, + durType: true, + } +) diff --git a/vendor/github.com/magefile/mage/mg/runtime.go b/vendor/github.com/magefile/mage/mg/runtime.go new file mode 100644 index 00000000000..9a8de12ce71 --- /dev/null +++ b/vendor/github.com/magefile/mage/mg/runtime.go @@ -0,0 +1,136 @@ +package mg + +import ( + "os" + "path/filepath" + "runtime" + "strconv" +) + +// CacheEnv is the environment variable that users may set to change the +// location where mage stores its compiled binaries. +const CacheEnv = "MAGEFILE_CACHE" + +// VerboseEnv is the environment variable that indicates the user requested +// verbose mode when running a magefile. +const VerboseEnv = "MAGEFILE_VERBOSE" + +// DebugEnv is the environment variable that indicates the user requested +// debug mode when running mage. +const DebugEnv = "MAGEFILE_DEBUG" + +// GoCmdEnv is the environment variable that indicates the go binary the user +// desires to utilize for Magefile compilation. +const GoCmdEnv = "MAGEFILE_GOCMD" + +// IgnoreDefaultEnv is the environment variable that indicates the user requested +// to ignore the default target specified in the magefile. +const IgnoreDefaultEnv = "MAGEFILE_IGNOREDEFAULT" + +// HashFastEnv is the environment variable that indicates the user requested to +// use a quick hash of magefiles to determine whether or not the magefile binary +// needs to be rebuilt. This results in faster runtimes, but means that mage +// will fail to rebuild if a dependency has changed. To force a rebuild, run +// mage with the -f flag. +const HashFastEnv = "MAGEFILE_HASHFAST" + +// EnableColorEnv is the environment variable that indicates the user is using +// a terminal which supports a color output. The default is false for backwards +// compatibility. When the value is true and the detected terminal does support colors +// then the list of mage targets will be displayed in ANSI color. When the value +// is true but the detected terminal does not support colors, then the list of +// mage targets will be displayed in the default colors (e.g. black and white). +const EnableColorEnv = "MAGEFILE_ENABLE_COLOR" + +// TargetColorEnv is the environment variable that indicates which ANSI color +// should be used to colorize mage targets. This is only applicable when +// the MAGEFILE_ENABLE_COLOR environment variable is true. +// The supported ANSI color names are any of these: +// - Black +// - Red +// - Green +// - Yellow +// - Blue +// - Magenta +// - Cyan +// - White +// - BrightBlack +// - BrightRed +// - BrightGreen +// - BrightYellow +// - BrightBlue +// - BrightMagenta +// - BrightCyan +// - BrightWhite +const TargetColorEnv = "MAGEFILE_TARGET_COLOR" + +// Verbose reports whether a magefile was run with the verbose flag. +func Verbose() bool { + b, _ := strconv.ParseBool(os.Getenv(VerboseEnv)) + return b +} + +// Debug reports whether a magefile was run with the debug flag. +func Debug() bool { + b, _ := strconv.ParseBool(os.Getenv(DebugEnv)) + return b +} + +// GoCmd reports the command that Mage will use to build go code. By default mage runs +// the "go" binary in the PATH. +func GoCmd() string { + if cmd := os.Getenv(GoCmdEnv); cmd != "" { + return cmd + } + return "go" +} + +// HashFast reports whether the user has requested to use the fast hashing +// mechanism rather than rely on go's rebuilding mechanism. +func HashFast() bool { + b, _ := strconv.ParseBool(os.Getenv(HashFastEnv)) + return b +} + +// IgnoreDefault reports whether the user has requested to ignore the default target +// in the magefile. +func IgnoreDefault() bool { + b, _ := strconv.ParseBool(os.Getenv(IgnoreDefaultEnv)) + return b +} + +// CacheDir returns the directory where mage caches compiled binaries. It +// defaults to $HOME/.magefile, but may be overridden by the MAGEFILE_CACHE +// environment variable. +func CacheDir() string { + d := os.Getenv(CacheEnv) + if d != "" { + return d + } + switch runtime.GOOS { + case "windows": + return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"), "magefile") + default: + return filepath.Join(os.Getenv("HOME"), ".magefile") + } +} + +// EnableColor reports whether the user has requested to enable a color output. +func EnableColor() bool { + b, _ := strconv.ParseBool(os.Getenv(EnableColorEnv)) + return b +} + +// TargetColor returns the configured ANSI color name a color output. +func TargetColor() string { + s, exists := os.LookupEnv(TargetColorEnv) + if exists { + if c, ok := getAnsiColor(s); ok { + return c + } + } + return DefaultTargetAnsiColor +} + +// Namespace allows for the grouping of similar commands +type Namespace struct{} diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go new file mode 100644 index 00000000000..312de65ae34 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/cmd.go @@ -0,0 +1,184 @@ +package sh + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/mg" +) + +// RunCmd returns a function that will call Run with the given command. This is +// useful for creating command aliases to make your scripts easier to read, like +// this: +// +// // in a helper file somewhere +// var g0 = sh.RunCmd("go") // go is a keyword :( +// +// // somewhere in your main code +// if err := g0("install", "github.com/gohugo/hugo"); err != nil { +// return err +// } +// +// Args passed to command get baked in as args to the command when you run it. +// Any args passed in when you run the returned function will be appended to the +// original args. For example, this is equivalent to the above: +// +// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo") +// +// RunCmd uses Exec underneath, so see those docs for more details. +func RunCmd(cmd string, args ...string) func(args ...string) error { + return func(args2 ...string) error { + return Run(cmd, append(args, args2...)...) + } +} + +// OutCmd is like RunCmd except the command returns the output of the +// command. +func OutCmd(cmd string, args ...string) func(args ...string) (string, error) { + return func(args2 ...string) (string, error) { + return Output(cmd, append(args, args2...)...) + } +} + +// Run is like RunWith, but doesn't specify any environment variables. +func Run(cmd string, args ...string) error { + return RunWith(nil, cmd, args...) +} + +// RunV is like Run, but always sends the command's stdout to os.Stdout. +func RunV(cmd string, args ...string) error { + _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// RunWith runs the given command, directing stderr to this program's stderr and +// printing stdout to stdout if mage was run with -v. It adds adds env to the +// environment variables for the command being run. Environment variables should +// be in the format name=value. +func RunWith(env map[string]string, cmd string, args ...string) error { + var output io.Writer + if mg.Verbose() { + output = os.Stdout + } + _, err := Exec(env, output, os.Stderr, cmd, args...) + return err +} + +// RunWithV is like RunWith, but always sends the command's stdout to os.Stdout. +func RunWithV(env map[string]string, cmd string, args ...string) error { + _, err := Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// Output runs the command and returns the text from stdout. +func Output(cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(nil, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// OutputWith is like RunWith, but returns what is written to stdout. +func OutputWith(env map[string]string, cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(env, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// Exec executes the command, piping its stdout and stderr to the given +// writers. If the command fails, it will return an error that, if returned +// from a target or mg.Deps call, will cause mage to exit with the same code as +// the command failed with. Env is a list of environment variables to set when +// running the command, these override the current environment variables set +// (which are also passed to the command). cmd and args may include references +// to environment variables in $FOO format, in which case these will be +// expanded before the command is run. +// +// Ran reports if the command ran (rather than was not found or not executable). +// Code reports the exit code the command returned if it ran. If err == nil, ran +// is always true and code is always 0. +func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) { + expand := func(s string) string { + s2, ok := env[s] + if ok { + return s2 + } + return os.Getenv(s) + } + cmd = os.Expand(cmd, expand) + for i := range args { + args[i] = os.Expand(args[i], expand) + } + ran, code, err := run(env, stdout, stderr, cmd, args...) + if err == nil { + return true, nil + } + if ran { + return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code) + } + return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err) +} + +func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) { + c := exec.Command(cmd, args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stderr = stderr + c.Stdout = stdout + c.Stdin = os.Stdin + + var quoted []string + for i := range args { + quoted = append(quoted, fmt.Sprintf("%q", args[i])); + } + // To protect against logging from doing exec in global variables + if mg.Verbose() { + log.Println("exec:", cmd, strings.Join(quoted, " ")) + } + err = c.Run() + return CmdRan(err), ExitStatus(err), err +} +// CmdRan examines the error to determine if it was generated as a result of a +// command running via os/exec.Command. If the error is nil, or the command ran +// (even if it exited with a non-zero exit code), CmdRan reports true. If the +// error is an unrecognized type, or it is an error from exec.Command that says +// the command failed to run (usually due to the command not existing or not +// being executable), it reports false. +func CmdRan(err error) bool { + if err == nil { + return true + } + ee, ok := err.(*exec.ExitError) + if ok { + return ee.Exited() + } + return false +} + +type exitStatus interface { + ExitStatus() int +} + +// ExitStatus returns the exit status of the error if it is an exec.ExitError +// or if it implements ExitStatus() int. +// 0 if it is nil or 1 if it is a different error. +func ExitStatus(err error) int { + if err == nil { + return 0 + } + if e, ok := err.(exitStatus); ok { + return e.ExitStatus() + } + if e, ok := err.(*exec.ExitError); ok { + if ex, ok := e.Sys().(exitStatus); ok { + return ex.ExitStatus() + } + } + return 1 +} diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go new file mode 100644 index 00000000000..f5d20a2712b --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/helpers.go @@ -0,0 +1,40 @@ +package sh + +import ( + "fmt" + "io" + "os" +) + +// Rm removes the given file or directory even if non-empty. It will not return +// an error if the target doesn't exist, only if the target cannot be removed. +func Rm(path string) error { + err := os.RemoveAll(path) + if err == nil || os.IsNotExist(err) { + return nil + } + return fmt.Errorf(`failed to remove %s: %v`, path, err) +} + +// Copy robustly copies the source file to the destination, overwriting the destination if necessary. +func Copy(dst string, src string) error { + from, err := os.Open(src) + if err != nil { + return fmt.Errorf(`can't copy %s: %v`, src, err) + } + defer from.Close() + finfo, err := from.Stat() + if err != nil { + return fmt.Errorf(`can't stat %s: %v`, src, err) + } + to, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, finfo.Mode()) + if err != nil { + return fmt.Errorf(`can't copy to %s: %v`, dst, err) + } + defer to.Close() + _, err = io.Copy(to, from) + if err != nil { + return fmt.Errorf(`error copying %s to %s: %v`, src, dst, err) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go b/vendor/github.com/minio/minio-go/v7/api-prompt-object.go deleted file mode 100644 index dac062a75b0..00000000000 --- a/vendor/github.com/minio/minio-go/v7/api-prompt-object.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2024 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "io" - "net/http" - - "github.com/goccy/go-json" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// PromptObject performs language model inference with the prompt and referenced object as context. -// Inference is performed using a Lambda handler that can process the prompt and object. -// Currently, this functionality is limited to certain MinIO servers. -func (c *Client) PromptObject(ctx context.Context, bucketName, objectName, prompt string, opts PromptObjectOptions) (io.ReadCloser, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", - Message: err.Error(), - } - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "XMinioInvalidObjectName", - Message: err.Error(), - } - } - - opts.AddLambdaArnToReqParams(opts.LambdaArn) - opts.SetHeader("Content-Type", "application/json") - opts.AddPromptArg("prompt", prompt) - promptReqBytes, err := json.Marshal(opts.PromptArgs) - if err != nil { - return nil, err - } - - // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: opts.toQueryValues(), - customHeader: opts.Header(), - contentSHA256Hex: sum256Hex(promptReqBytes), - contentBody: bytes.NewReader(promptReqBytes), - contentLength: int64(len(promptReqBytes)), - }) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - defer closeResponse(resp) - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - - return resp.Body, nil -} diff --git a/vendor/github.com/minio/minio-go/v7/api-prompt-options.go b/vendor/github.com/minio/minio-go/v7/api-prompt-options.go deleted file mode 100644 index 4493a75d4c7..00000000000 --- a/vendor/github.com/minio/minio-go/v7/api-prompt-options.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2024 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net/http" - "net/url" -) - -// PromptObjectOptions provides options to PromptObject call. -// LambdaArn is the ARN of the Prompt Lambda to be invoked. -// PromptArgs is a map of key-value pairs to be passed to the inference action on the Prompt Lambda. -// "prompt" is a reserved key and should not be used as a key in PromptArgs. -type PromptObjectOptions struct { - LambdaArn string - PromptArgs map[string]any - headers map[string]string - reqParams url.Values -} - -// Header returns the http.Header representation of the POST options. -func (o PromptObjectOptions) Header() http.Header { - headers := make(http.Header, len(o.headers)) - for k, v := range o.headers { - headers.Set(k, v) - } - return headers -} - -// AddPromptArg Add a key value pair to the prompt arguments where the key is a string and -// the value is a JSON serializable. -func (o *PromptObjectOptions) AddPromptArg(key string, value any) { - if o.PromptArgs == nil { - o.PromptArgs = make(map[string]any) - } - o.PromptArgs[key] = value -} - -// AddLambdaArnToReqParams adds the lambdaArn to the request query string parameters. -func (o *PromptObjectOptions) AddLambdaArnToReqParams(lambdaArn string) { - if o.reqParams == nil { - o.reqParams = make(url.Values) - } - o.reqParams.Add("lambdaArn", lambdaArn) -} - -// SetHeader adds a key value pair to the options. The -// key-value pair will be part of the HTTP POST request -// headers. -func (o *PromptObjectOptions) SetHeader(key, value string) { - if o.headers == nil { - o.headers = make(map[string]string) - } - o.headers[http.CanonicalHeaderKey(key)] = value -} - -// toQueryValues - Convert the reqParams in Options to query string parameters. -func (o *PromptObjectOptions) toQueryValues() url.Values { - urlValues := make(url.Values) - if o.reqParams != nil { - for key, values := range o.reqParams { - for _, value := range values { - urlValues.Add(key, value) - } - } - } - - return urlValues -} diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go index 3023b949cd4..0ae9142e1d3 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-fan-out.go @@ -85,10 +85,7 @@ func (c *Client) PutObjectFanOut(ctx context.Context, bucket string, fanOutData policy.SetEncryption(fanOutReq.SSE) // Set checksum headers if any. - err := policy.SetChecksum(fanOutReq.Checksum) - if err != nil { - return nil, err - } + policy.SetChecksum(fanOutReq.Checksum) url, formData, err := c.PresignedPostPolicy(ctx, policy) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 88e8d434777..380ec4fdefe 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -133,7 +133,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.81" + libraryVersion = "v7.0.80" ) // User Agent should always following the below style. diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 43383d13486..c0180b36b70 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -160,7 +160,7 @@ func logError(testName, function string, args map[string]interface{}, startTime } else { logFailure(testName, function, args, startTime, alert, message, err) if !isRunOnFail() { - panic(fmt.Sprintf("Test failed with message: %s, err: %v", message, err)) + panic(err) } } } @@ -393,42 +393,6 @@ func getFuncNameLoc(caller int) string { return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") } -type ClientConfig struct { - // MinIO client configuration - TraceOn bool // Turn on tracing of HTTP requests and responses to stderr - CredsV2 bool // Use V2 credentials if true, otherwise use v4 - TrailingHeaders bool // Send trailing headers in requests -} - -func NewClient(config ClientConfig) (*minio.Client, error) { - // Instantiate new MinIO client - var creds *credentials.Credentials - if config.CredsV2 { - creds = credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), "") - } else { - creds = credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), "") - } - opts := &minio.Options{ - Creds: creds, - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - TrailingHeaders: config.TrailingHeaders, - } - client, err := minio.New(os.Getenv(serverEndpoint), opts) - if err != nil { - return nil, err - } - - if config.TraceOn { - client.TraceOn(os.Stderr) - } - - // Set user agent. - client.SetAppInfo("MinIO-go-FunctionalTest", appVersion) - - return client, nil -} - // Tests bucket re-create errors. func testMakeBucketError() { region := "eu-central-1" @@ -443,12 +407,27 @@ func testMakeBucketError() { "region": region, } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -483,12 +462,20 @@ func testMetadataSizeLimit() { "objectName": "", "opts.UserMetadata": "", } + rand.Seed(startTime.Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -544,12 +531,27 @@ func testMakeBucketRegions() { "region": region, } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -596,12 +598,27 @@ func testPutObjectReadAt() { "opts": "objectContentType", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -680,12 +697,27 @@ func testListObjectVersions() { "recursive": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -785,12 +817,27 @@ func testStatObjectWithVersioning() { function := "StatObject" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -888,12 +935,27 @@ func testGetObjectWithVersioning() { function := "GetObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1013,12 +1075,27 @@ func testPutObjectWithVersioning() { function := "GetObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1146,12 +1223,28 @@ func testListMultipartUpload() { function := "GetObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + opts := &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + } + c, err := minio.New(os.Getenv(serverEndpoint), opts) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } - core := minio.Core{Client: c} + core, err := minio.NewCore(os.Getenv(serverEndpoint), opts) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO core client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -1254,12 +1347,27 @@ func testCopyObjectWithVersioning() { function := "CopyObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1377,12 +1485,27 @@ func testConcurrentCopyObjectWithVersioning() { function := "CopyObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1523,12 +1646,27 @@ func testComposeObjectWithVersioning() { function := "ComposeObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1649,12 +1787,27 @@ func testRemoveObjectWithVersioning() { function := "DeleteObject()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1747,12 +1900,27 @@ func testRemoveObjectsWithVersioning() { function := "DeleteObjects()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1828,12 +1996,27 @@ func testObjectTaggingWithVersioning() { function := "{Get,Set,Remove}ObjectTagging()" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -1981,12 +2164,27 @@ func testPutObjectWithChecksums() { return } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2032,7 +2230,7 @@ func testPutObjectWithChecksums() { h := test.cs.Hasher() h.Reset() - // Test with a bad CRC - we haven't called h.Write(b), so this is a checksum of empty data + // Test with Wrong CRC. meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) args["metadata"] = meta args["range"] = "false" @@ -2152,12 +2350,28 @@ func testPutObjectWithTrailingChecksums() { return } - c, err := NewClient(ClientConfig{TrailingHeaders: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2327,12 +2541,28 @@ func testPutMultipartObjectWithChecksums(trailing bool) { return } - c, err := NewClient(ClientConfig{TrailingHeaders: trailing}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: trailing, + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2390,7 +2620,7 @@ func testPutMultipartObjectWithChecksums(trailing bool) { cmpChecksum := func(got, want string) { if want != got { logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - // fmt.Printf("want %s, got %s\n", want, got) + //fmt.Printf("want %s, got %s\n", want, got) return } } @@ -2511,12 +2741,25 @@ func testTrailingChecksums() { return } - c, err := NewClient(ClientConfig{TrailingHeaders: true}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2638,6 +2881,7 @@ func testTrailingChecksums() { test.ChecksumCRC32C = hashMultiPart(b, int(test.PO.PartSize), test.hasher) // Set correct CRC. + // c.TraceOn(os.Stderr) resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), test.PO) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -2689,8 +2933,6 @@ func testTrailingChecksums() { delete(args, "metadata") } - - logSuccess(testName, function, args, startTime) } // Test PutObject with custom checksums. @@ -2710,12 +2952,25 @@ func testPutObjectWithAutomaticChecksums() { return } - c, err := NewClient(ClientConfig{TrailingHeaders: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -2742,6 +2997,8 @@ func testPutObjectWithAutomaticChecksums() { {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) // defer c.TraceOff() for i, test := range tests { @@ -2851,12 +3108,20 @@ func testGetObjectAttributes() { return } - c, err := NewClient(ClientConfig{TrailingHeaders: true}) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName err = c.MakeBucket( @@ -3050,12 +3315,19 @@ func testGetObjectAttributesSSECEncryption() { return } - c, err := NewClient(ClientConfig{TrailingHeaders: true}) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName err = c.MakeBucket( @@ -3129,12 +3401,19 @@ func testGetObjectAttributesErrorCases() { return } - c, err := NewClient(ClientConfig{TrailingHeaders: true}) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + TrailingHeaders: true, + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) unknownBucket := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-bucket-") unknownObject := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-object-") @@ -3378,12 +3657,27 @@ func testPutObjectWithMetadata() { return } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3470,12 +3764,27 @@ func testPutObjectWithContentLanguage() { "opts": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3525,12 +3834,27 @@ func testPutObjectStreaming() { "opts": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3582,12 +3906,27 @@ func testGetObjectSeekEnd() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3690,12 +4029,27 @@ func testGetObjectClosedTwice() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3766,13 +4120,26 @@ func testRemoveObjectsContext() { "bucketName": "", } + // Seed random based on current tie. + rand.Seed(time.Now().Unix()) + // Instantiate new minio client. - c, err := NewClient(ClientConfig{}) + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3850,12 +4217,27 @@ func testRemoveMultipleObjects() { "bucketName": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -3919,12 +4301,27 @@ func testRemoveMultipleObjectsWithResult() { "bucketName": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Enable tracing, write to stdout. + // c.TraceOn(os.Stderr) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4040,12 +4437,27 @@ func testFPutObjectMultipart() { "opts": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4131,12 +4543,27 @@ func testFPutObject() { "opts": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") location := "us-east-1" @@ -4286,13 +4713,27 @@ func testFPutObjectContext() { "fileName": "", "opts": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4373,13 +4814,27 @@ func testFPutObjectContextV2() { "objectName": "", "opts": "minio.PutObjectOptions{ContentType:objectContentType}", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4464,12 +4919,24 @@ func testPutObjectContext() { "opts": "", } - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4522,12 +4989,27 @@ func testGetObjectS3Zip() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{"x-minio-extract": true} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4691,12 +5173,27 @@ func testGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -4846,12 +5343,27 @@ func testGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5009,12 +5521,27 @@ func testGetObjectReadAtWhenEOFWasReached() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5114,12 +5641,27 @@ func testPresignedPostPolicy() { "policy": "", } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5147,22 +5689,50 @@ func testPresignedPostPolicy() { return } + // Save the data + _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + policy := minio.NewPostPolicy() + + if err := policy.SetBucket(""); err == nil { + logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) + return + } + if err := policy.SetKey(""); err == nil { + logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) + return + } + if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { + logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) + return + } + if err := policy.SetContentType(""); err == nil { + logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) + return + } + if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { + logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) + return + } + if err := policy.SetUserMetadata("", ""); err == nil { + logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) + return + } + policy.SetBucket(bucketName) policy.SetKey(objectName) policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days policy.SetContentType("binary/octet-stream") policy.SetContentLengthRange(10, 1024*1024) policy.SetUserMetadata(metadataKey, metadataValue) - policy.SetContentEncoding("gzip") // Add CRC32C checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) - err = policy.SetChecksum(checksum) - if err != nil { - logError(testName, function, args, startTime, "", "SetChecksum failed", err) - return - } + policy.SetChecksum(checksum) args["policy"] = policy.String() @@ -5255,209 +5825,24 @@ func testPresignedPostPolicy() { scheme = "http://" } - expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName - expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - - if !strings.Contains(expectedLocation, ".amazonaws.com/") { - // Test when not against AWS S3. - if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { - logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err) - return - } - } else { - logError(testName, function, args, startTime, "", "Location not found in header response", err) - return - } - } - wantChecksumCrc32c := checksum.Encoded() - if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != wantChecksumCrc32c { - logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", wantChecksumCrc32c, got), nil) - return - } - - // Ensure that when we subsequently GetObject, the checksum is returned - gopts := minio.GetObjectOptions{Checksum: true} - r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.ChecksumCRC32C != wantChecksumCrc32c { - logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %s, got %s", wantChecksumCrc32c, st.ChecksumCRC32C), nil) - return - } - - logSuccess(testName, function, args, startTime) -} - -// testPresignedPostPolicyWrongFile tests that when we have a policy with a checksum, we cannot POST the wrong file -func testPresignedPostPolicyWrongFile() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PresignedPostPolicy(policy)" - args := map[string]interface{}{ - "policy": "", - } - - c, err := NewClient(ClientConfig{}) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - reader := getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - // Azure requires the key to not start with a number - metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") - metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - buf, err := io.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - policy := minio.NewPostPolicy() - policy.SetBucket(bucketName) - policy.SetKey(objectName) - policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days - policy.SetContentType("binary/octet-stream") - policy.SetContentLengthRange(10, 1024*1024) - policy.SetUserMetadata(metadataKey, metadataValue) - - // Add CRC32C of the 33kB file that the policy will explicitly allow. - checksum := minio.ChecksumCRC32C.ChecksumBytes(buf) - err = policy.SetChecksum(checksum) - if err != nil { - logError(testName, function, args, startTime, "", "SetChecksum failed", err) - return - } - - args["policy"] = policy.String() - - presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) - return - } - - // At this stage, we have a policy that allows us to upload datafile-33-kB. - // Test that uploading datafile-10-kB, with a different checksum, fails as expected - filePath := getMintDataDirFilePath("datafile-10-kB") - if filePath == "" { - // Make a temp file with 10 KB data. - file, err := os.CreateTemp(os.TempDir(), "PresignedPostPolicyTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - if _, err = io.Copy(file, getDataReader("datafile-10-kB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - filePath = file.Name() - } - fileReader := getDataReader("datafile-10-kB") - defer fileReader.Close() - buf10k, err := io.ReadAll(fileReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - otherChecksum := minio.ChecksumCRC32C.ChecksumBytes(buf10k) - - var formBuf bytes.Buffer - writer := multipart.NewWriter(&formBuf) - for k, v := range formData { - if k == "x-amz-checksum-crc32c" { - v = otherChecksum.Encoded() - } - writer.WriteField(k, v) - } - - // Add file to post request - f, err := os.Open(filePath) - defer f.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - w, err := writer.CreateFormFile("file", filePath) - if err != nil { - logError(testName, function, args, startTime, "", "CreateFormFile failed", err) - return - } - _, err = io.Copy(w, f) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - writer.Close() - - httpClient := &http.Client{ - Timeout: 30 * time.Second, - Transport: createHTTPTransport(), - } - args["url"] = presignedPostPolicyURL.String() - - req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request failed", err) - return - } - - req.Header.Set("Content-Type", writer.FormDataContentType()) - - // Make the POST request with the form data. - res, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request failed", err) - return - } - defer res.Body.Close() - if res.StatusCode != http.StatusForbidden { - logError(testName, function, args, startTime, "", "HTTP request unexpected status", errors.New(res.Status)) - return - } - - // Read the response body, ensure it has checksum failure message - resBody, err := io.ReadAll(res.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } + expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName + expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - // Normalize the response body, because S3 uses quotes around the policy condition components - // in the error message, MinIO does not. - resBodyStr := strings.ReplaceAll(string(resBody), `"`, "") - if !strings.Contains(resBodyStr, "Policy Condition failed: [eq, $x-amz-checksum-crc32c, aHnJMw==]") { - logError(testName, function, args, startTime, "", "Unexpected response body", errors.New(resBodyStr)) + if !strings.Contains(expectedLocation, "s3.amazonaws.com/") { + // Test when not against AWS S3. + if val, ok := res.Header["Location"]; ok { + if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { + logError(testName, function, args, startTime, "", fmt.Sprintf("Location in header response is incorrect. Want %q or %q, got %q", expectedLocation, expectedLocationBucketDNS, val[0]), err) + return + } + } else { + logError(testName, function, args, startTime, "", "Location not found in header response", err) + return + } + } + want := checksum.Encoded() + if got := res.Header.Get("X-Amz-Checksum-Crc32c"); got != want { + logError(testName, function, args, startTime, "", fmt.Sprintf("Want checksum %q, got %q", want, got), nil) return } @@ -5472,12 +5857,27 @@ func testCopyObject() { function := "CopyObject(dst, src)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -5652,12 +6052,27 @@ func testSSECEncryptedGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5820,12 +6235,27 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -5986,12 +6416,27 @@ func testSSECEncryptedGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6155,12 +6600,27 @@ func testSSES3EncryptedGetObjectReadAtFunctional() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6325,13 +6785,27 @@ func testSSECEncryptionPutGet() { "objectName": "", "sse": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6421,13 +6895,27 @@ func testSSECEncryptionFPut() { "contentType": "", "sse": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6530,13 +7018,27 @@ func testSSES3EncryptionPutGet() { "objectName": "", "sse": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6624,13 +7126,27 @@ func testSSES3EncryptionFPut() { "contentType": "", "sse": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -6739,12 +7255,26 @@ func testBucketNotification() { return } - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + bucketName := os.Getenv("NOTIFY_BUCKET") args["bucketName"] = bucketName @@ -6820,12 +7350,26 @@ func testFunctional() { functionAll := "" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) return } + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -7485,12 +8029,24 @@ func testGetObjectModified() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7569,12 +8125,24 @@ func testPutObjectUploadSeekedObject() { "contentType": "binary/octet-stream", } - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7677,12 +8245,27 @@ func testMakeBucketErrorV2() { "region": "eu-west-1", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") region := "eu-west-1" @@ -7722,12 +8305,27 @@ func testGetObjectClosedTwiceV2() { "region": "eu-west-1", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7798,12 +8396,27 @@ func testFPutObjectV2() { "opts": "", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7944,12 +8557,27 @@ func testMakeBucketRegionsV2() { "region": "eu-west-1", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -7992,12 +8620,27 @@ func testGetObjectReadSeekFunctionalV2() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8132,12 +8775,27 @@ func testGetObjectReadAtFunctionalV2() { function := "GetObject(bucketName, objectName)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -8279,12 +8937,27 @@ func testCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") @@ -8483,7 +9156,13 @@ func testComposeObjectErrorCasesV2() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8575,7 +9254,13 @@ func testCompose10KSourcesV2() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8591,7 +9276,13 @@ func testEncryptedEmptyObject() { function := "PutObject(bucketName, objectName, reader, objectSize, opts)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -8739,7 +9430,7 @@ func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, dstEncryption = sseDst } // 3. get copied object and check if content is equal - coreClient := minio.Core{Client: c} + coreClient := minio.Core{c} reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) if err != nil { logError(testName, function, args, startTime, "", "GetObject failed", err) @@ -8846,7 +9537,13 @@ func testUnencryptedToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8855,6 +9552,7 @@ func testUnencryptedToSSECCopyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) } @@ -8866,7 +9564,13 @@ func testUnencryptedToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8876,6 +9580,7 @@ func testUnencryptedToSSES3CopyObject() { var sseSrc encrypt.ServerSide sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -8887,7 +9592,13 @@ func testUnencryptedToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8896,6 +9607,7 @@ func testUnencryptedToUnencryptedCopyObject() { bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") var sseSrc, sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -8907,7 +9619,13 @@ func testEncryptedSSECToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8917,6 +9635,7 @@ func testEncryptedSSECToSSECCopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -8928,7 +9647,13 @@ func testEncryptedSSECToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8938,6 +9663,7 @@ func testEncryptedSSECToSSES3CopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -8949,7 +9675,13 @@ func testEncryptedSSECToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8959,6 +9691,7 @@ func testEncryptedSSECToUnencryptedCopyObject() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -8970,7 +9703,13 @@ func testEncryptedSSES3ToSSECCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -8980,6 +9719,7 @@ func testEncryptedSSES3ToSSECCopyObject() { sseSrc := encrypt.NewSSE() sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -8991,7 +9731,13 @@ func testEncryptedSSES3ToSSES3CopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9001,6 +9747,7 @@ func testEncryptedSSES3ToSSES3CopyObject() { sseSrc := encrypt.NewSSE() sseDst := encrypt.NewSSE() + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9012,7 +9759,13 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9022,6 +9775,7 @@ func testEncryptedSSES3ToUnencryptedCopyObject() { sseSrc := encrypt.NewSSE() var sseDst encrypt.ServerSide + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9033,7 +9787,13 @@ func testEncryptedCopyObjectV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9043,6 +9803,7 @@ func testEncryptedCopyObjectV2() { sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) + // c.TraceOn(os.Stderr) testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) } @@ -9053,7 +9814,13 @@ func testDecryptedCopyObject() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) return @@ -9107,14 +9874,26 @@ func testSSECMultipartEncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9293,14 +10072,26 @@ func testSSECEncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9459,14 +10250,26 @@ func testSSECEncryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9624,14 +10427,26 @@ func testSSECEncryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9792,14 +10607,26 @@ func testUnencryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -9955,14 +10782,26 @@ func testUnencryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10114,14 +10953,26 @@ func testUnencryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10275,14 +11126,26 @@ func testSSES3EncryptedToSSECCopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10439,14 +11302,26 @@ func testSSES3EncryptedToUnencryptedCopyPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10599,14 +11474,26 @@ func testSSES3EncryptedToSSES3CopyObjectPart() { function := "CopyObjectPart(destination, source)" args := map[string]interface{}{} - client, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + client, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return } // Instantiate new core client object. - c := minio.Core{Client: client} + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") @@ -10761,12 +11648,19 @@ func testUserMetadataCopying() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // c.TraceOn(os.Stderr) testUserMetadataCopyingWrapper(c) } @@ -10931,12 +11825,19 @@ func testUserMetadataCopyingV2() { function := "CopyObject(destination, source)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // c.TraceOn(os.Stderr) testUserMetadataCopyingWrapper(c) } @@ -10947,7 +11848,13 @@ func testStorageClassMetadataPutObject() { args := map[string]interface{}{} testName := getFuncName() - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11029,7 +11936,13 @@ func testStorageClassInvalidMetadataPutObject() { args := map[string]interface{}{} testName := getFuncName() - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11066,7 +11979,13 @@ func testStorageClassMetadataCopyObject() { args := map[string]interface{}{} testName := getFuncName() - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) return @@ -11187,12 +12106,27 @@ func testPutObjectNoLengthV2() { "opts": "", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -11248,12 +12182,27 @@ func testPutObjectsUnknownV2() { "opts": "", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -11324,12 +12273,27 @@ func testPutObject0ByteV2() { "opts": "", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -11374,7 +12338,13 @@ func testComposeObjectErrorCases() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -11391,7 +12361,13 @@ func testCompose10KSources() { function := "ComposeObject(destination, sourceList)" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return @@ -11409,12 +12385,26 @@ func testFunctionalV2() { functionAll := "" args := map[string]interface{}{} - c, err := NewClient(ClientConfig{CredsV2: true}) + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + Transport: createHTTPTransport(), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable to debug + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") location := "us-east-1" @@ -11848,13 +12838,27 @@ func testGetObjectContext() { "bucketName": "", "objectName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -11937,13 +12941,27 @@ func testFGetObjectContext() { "objectName": "", "fileName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12015,12 +13033,24 @@ func testGetObjectRanges() { defer cancel() rng := rand.NewSource(time.Now().UnixNano()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rng, "minio-go-test-") args["bucketName"] = bucketName @@ -12110,13 +13140,27 @@ func testGetObjectACLContext() { "bucketName": "", "objectName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12274,12 +13318,24 @@ func testPutObjectContextV2() { "size": "", "opts": "", } - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Make a new bucket. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12334,13 +13390,27 @@ func testGetObjectContextV2() { "bucketName": "", "objectName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12421,13 +13491,27 @@ func testFGetObjectContextV2() { "objectName": "", "fileName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{CredsV2: true}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12496,13 +13580,27 @@ func testListObjects() { "objectPrefix": "", "recursive": "true", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -12586,12 +13684,24 @@ func testCors() { "cors": "", } - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Create or reuse a bucket that will get cors settings applied to it and deleted when done bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS") if bucketName == "" { @@ -13310,12 +14420,24 @@ func testCorsSetGetDelete() { "cors": "", } - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13397,13 +14519,27 @@ func testRemoveObjects() { "objectPrefix": "", "recursive": "true", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13517,13 +14653,27 @@ func testGetBucketTagging() { args := map[string]interface{}{ "bucketName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13559,13 +14709,27 @@ func testSetBucketTagging() { "bucketName": "", "tags": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13631,13 +14795,27 @@ func testRemoveBucketTagging() { args := map[string]interface{}{ "bucketName": "", } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) - c, err := NewClient(ClientConfig{}) + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) return } + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + // Generate a new random bucket name. bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") args["bucketName"] = bucketName @@ -13783,7 +14961,6 @@ func main() { testGetObjectReadAtFunctional() testGetObjectReadAtWhenEOFWasReached() testPresignedPostPolicy() - testPresignedPostPolicyWrongFile() testCopyObject() testComposeObjectErrorCases() testCompose10KSources() diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go index 787f0a38d69..f1c76c78ea0 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go @@ -58,10 +58,9 @@ type WebIdentityResult struct { // WebIdentityToken - web identity token with expiry. type WebIdentityToken struct { - Token string - AccessToken string - RefreshToken string - Expiry int + Token string + AccessToken string + Expiry int } // A STSWebIdentity retrieves credentials from MinIO service, and keeps track if diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 26bf441b56f..19687e027d0 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -85,7 +85,7 @@ func (p *PostPolicy) SetExpires(t time.Time) error { // SetKey - Sets an object name for the policy based upload. func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" { + if strings.TrimSpace(key) == "" || key == "" { return errInvalidArgument("Object name is empty.") } policyCond := policyCondition{ @@ -118,7 +118,7 @@ func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { // SetBucket - Sets bucket at which objects will be uploaded to. func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { return errInvalidArgument("Bucket name is empty.") } policyCond := policyCondition{ @@ -135,7 +135,7 @@ func (p *PostPolicy) SetBucket(bucketName string) error { // SetCondition - Sets condition for credentials, date and algorithm func (p *PostPolicy) SetCondition(matchType, condition, value string) error { - if strings.TrimSpace(value) == "" { + if strings.TrimSpace(value) == "" || value == "" { return errInvalidArgument("No value specified for condition") } @@ -156,7 +156,7 @@ func (p *PostPolicy) SetCondition(matchType, condition, value string) error { // SetTagging - Sets tagging for the object for this policy based upload. func (p *PostPolicy) SetTagging(tagging string) error { - if strings.TrimSpace(tagging) == "" { + if strings.TrimSpace(tagging) == "" || tagging == "" { return errInvalidArgument("No tagging specified.") } _, err := tags.ParseObjectXML(strings.NewReader(tagging)) @@ -178,7 +178,7 @@ func (p *PostPolicy) SetTagging(tagging string) error { // SetContentType - Sets content-type of the object for this policy // based upload. func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" { + if strings.TrimSpace(contentType) == "" || contentType == "" { return errInvalidArgument("No content type specified.") } policyCond := policyCondition{ @@ -211,7 +211,7 @@ func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) erro // SetContentDisposition - Sets content-disposition of the object for this policy func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { - if strings.TrimSpace(contentDisposition) == "" { + if strings.TrimSpace(contentDisposition) == "" || contentDisposition == "" { return errInvalidArgument("No content disposition specified.") } policyCond := policyCondition{ @@ -226,44 +226,27 @@ func (p *PostPolicy) SetContentDisposition(contentDisposition string) error { return nil } -// SetContentEncoding - Sets content-encoding of the object for this policy -func (p *PostPolicy) SetContentEncoding(contentEncoding string) error { - if strings.TrimSpace(contentEncoding) == "" { - return errInvalidArgument("No content encoding specified.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$Content-Encoding", - value: contentEncoding, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Encoding"] = contentEncoding - return nil -} - // SetContentLengthRange - Set new min and max content length // condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(minLen, maxLen int64) error { - if minLen > maxLen { +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { return errInvalidArgument("Minimum limit is larger than maximum limit.") } - if minLen < 0 { + if min < 0 { return errInvalidArgument("Minimum limit cannot be negative.") } - if maxLen <= 0 { + if max <= 0 { return errInvalidArgument("Maximum limit cannot be non-positive.") } - p.contentLengthRange.min = minLen - p.contentLengthRange.max = maxLen + p.contentLengthRange.min = min + p.contentLengthRange.max = max return nil } // SetSuccessActionRedirect - Sets the redirect success url of the object for this policy // based upload. func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { - if strings.TrimSpace(redirect) == "" { + if strings.TrimSpace(redirect) == "" || redirect == "" { return errInvalidArgument("Redirect is empty") } policyCond := policyCondition{ @@ -281,7 +264,7 @@ func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { // SetSuccessStatusAction - Sets the status success code of the object for this policy // based upload. func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" { + if strings.TrimSpace(status) == "" || status == "" { return errInvalidArgument("Status is empty") } policyCond := policyCondition{ @@ -299,10 +282,10 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error { // SetUserMetadata - Set user metadata as a key/value couple. // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadata(key, value string) error { - if strings.TrimSpace(key) == "" { + if strings.TrimSpace(key) == "" || key == "" { return errInvalidArgument("Key is empty") } - if strings.TrimSpace(value) == "" { + if strings.TrimSpace(value) == "" || value == "" { return errInvalidArgument("Value is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) @@ -321,7 +304,7 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error { // SetUserMetadataStartsWith - Set how an user metadata should starts with. // Can be retrieved through a HEAD request or an event. func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { - if strings.TrimSpace(key) == "" { + if strings.TrimSpace(key) == "" || key == "" { return errInvalidArgument("Key is empty") } headerName := fmt.Sprintf("x-amz-meta-%s", key) @@ -338,29 +321,11 @@ func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { } // SetChecksum sets the checksum of the request. -func (p *PostPolicy) SetChecksum(c Checksum) error { +func (p *PostPolicy) SetChecksum(c Checksum) { if c.IsSet() { p.formData[amzChecksumAlgo] = c.Type.String() p.formData[c.Type.Key()] = c.Encoded() - - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", amzChecksumAlgo), - value: c.Type.String(), - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - policyCond = policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", c.Type.Key()), - value: c.Encoded(), - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } } - return nil } // SetEncryption - sets encryption headers for POST API diff --git a/vendor/github.com/minio/minio-go/v7/retry-continous.go b/vendor/github.com/minio/minio-go/v7/retry-continous.go index 81fcf16f1b9..bfeea95f30d 100644 --- a/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ b/vendor/github.com/minio/minio-go/v7/retry-continous.go @@ -20,7 +20,7 @@ package minio import "time" // newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitter float64, doneCh chan struct{}) <-chan int { +func (c *Client) newRetryTimerContinous(unit, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { attemptCh := make(chan int) // normalize jitter to the range [0, 1.0] @@ -39,10 +39,10 @@ func (c *Client) newRetryTimerContinous(baseSleep, maxSleep time.Duration, jitte if attempt > maxAttempt { attempt = maxAttempt } - // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) - sleep := baseSleep * time.Duration(1< maxSleep { - sleep = maxSleep + // sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap } if jitter != NoJitter { sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index 4cc45920c4a..d15eb59013e 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -45,7 +45,7 @@ var DefaultRetryCap = time.Second // newRetryTimer creates a timer with exponentially increasing // delays until the maximum retry attempts are reached. -func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, maxSleep time.Duration, jitter float64) <-chan int { +func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, unit, cap time.Duration, jitter float64) <-chan int { attemptCh := make(chan int) // computes the exponential backoff duration according to @@ -59,10 +59,10 @@ func (c *Client) newRetryTimer(ctx context.Context, maxRetry int, baseSleep, max jitter = MaxJitter } - // sleep = random_between(0, min(maxSleep, base * 2 ** attempt)) - sleep := baseSleep * time.Duration(1< maxSleep { - sleep = maxSleep + // sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap } if jitter != NoJitter { sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml deleted file mode 100644 index 928d000ec49..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.8 - - 1.x - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md deleted file mode 100644 index 26781bbae88..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# go-testing-interface - -go-testing-interface is a Go library that exports an interface that -`*testing.T` implements as well as a runtime version you can use in its -place. - -The purpose of this library is so that you can export test helpers as a -public API without depending on the "testing" package, since you can't -create a `*testing.T` struct manually. This lets you, for example, use the -public testing APIs to generate mock data at runtime, rather than just at -test time. - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). - -Given a test helper written using `go-testing-interface` like this: - - import "github.com/mitchellh/go-testing-interface" - - func TestHelper(t testing.T) { - t.Fatal("I failed") - } - -You can call the test helper in a real test easily: - - import "testing" - - func TestThing(t *testing.T) { - TestHelper(t) - } - -You can also call the test helper at runtime if needed: - - import "github.com/mitchellh/go-testing-interface" - - func main() { - TestHelper(&testing.RuntimeT{}) - } - -## Why?! - -**Why would I call a test helper that takes a *testing.T at runtime?** - -You probably shouldn't. The only use case I've seen (and I've had) for this -is to implement a "dev mode" for a service where the test helpers are used -to populate mock data, create a mock DB, perhaps run service dependencies -in-memory, etc. - -Outside of a "dev mode", I've never seen a use case for this and I think -there shouldn't be one since the point of the `testing.T` interface is that -you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go deleted file mode 100644 index 204afb42005..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build !go1.9 - -package testing - -import ( - "fmt" - "log" -) - -// T is the interface that mimics the standard library *testing.T. -// -// In unit tests you can just pass a *testing.T struct. At runtime, outside -// of tests, you can pass in a RuntimeT struct from this package. -type T interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool -} - -// RuntimeT implements T and can be instantiated and run at runtime to -// mimic *testing.T behavior. Unlike *testing.T, this will simply panic -// for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. Name and Skip methods are -// unimplemented noops. -type RuntimeT struct { - failed bool -} - -func (t *RuntimeT) Error(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.Fail() -} - -func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.Fail() -} - -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) - t.FailNow() -} - -func (t *RuntimeT) Fail() { - t.failed = true -} - -func (t *RuntimeT) FailNow() { - panic("testing.T failed, see logs for output (if any)") -} - -func (t *RuntimeT) Failed() bool { - return t.failed -} - -func (t *RuntimeT) Log(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) -} - -func (t *RuntimeT) Logf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) -} - -func (t *RuntimeT) Name() string { return "" } -func (t *RuntimeT) Skip(args ...interface{}) {} -func (t *RuntimeT) SkipNow() {} -func (t *RuntimeT) Skipf(format string, args ...interface{}) {} -func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go deleted file mode 100644 index 31b42cadf8d..00000000000 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build go1.9 - -// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition -// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC -// and is set for release shortly. We'll support this on master as the default -// as soon as 1.9 is released. - -package testing - -import ( - "fmt" - "log" -) - -// T is the interface that mimics the standard library *testing.T. -// -// In unit tests you can just pass a *testing.T struct. At runtime, outside -// of tests, you can pass in a RuntimeT struct from this package. -type T interface { - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Fail() - FailNow() - Failed() bool - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Log(args ...interface{}) - Logf(format string, args ...interface{}) - Name() string - Skip(args ...interface{}) - SkipNow() - Skipf(format string, args ...interface{}) - Skipped() bool - Helper() -} - -// RuntimeT implements T and can be instantiated and run at runtime to -// mimic *testing.T behavior. Unlike *testing.T, this will simply panic -// for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. -type RuntimeT struct { - skipped bool - failed bool -} - -func (t *RuntimeT) Error(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) - t.Fail() -} - -func (t *RuntimeT) Errorf(format string, args ...interface{}) { - log.Printf(format, args...) - t.Fail() -} - -func (t *RuntimeT) Fail() { - t.failed = true -} - -func (t *RuntimeT) FailNow() { - panic("testing.T failed, see logs for output (if any)") -} - -func (t *RuntimeT) Failed() bool { - return t.failed -} - -func (t *RuntimeT) Fatal(args ...interface{}) { - log.Print(args...) - t.FailNow() -} - -func (t *RuntimeT) Fatalf(format string, args ...interface{}) { - log.Printf(format, args...) - t.FailNow() -} - -func (t *RuntimeT) Log(args ...interface{}) { - log.Println(fmt.Sprintln(args...)) -} - -func (t *RuntimeT) Logf(format string, args ...interface{}) { - log.Println(fmt.Sprintf(format, args...)) -} - -func (t *RuntimeT) Name() string { - return "" -} - -func (t *RuntimeT) Skip(args ...interface{}) { - log.Print(args...) - t.SkipNow() -} - -func (t *RuntimeT) SkipNow() { - t.skipped = true -} - -func (t *RuntimeT) Skipf(format string, args ...interface{}) { - log.Printf(format, args...) - t.SkipNow() -} - -func (t *RuntimeT) Skipped() bool { - return t.skipped -} - -func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go new file mode 100644 index 00000000000..66e5091136a --- /dev/null +++ b/vendor/github.com/mostynb/go-grpc-compression/internal/zstd/zstd.go @@ -0,0 +1,146 @@ +// Copyright 2020 Mostyn Bramley-Moore. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd is a wrapper for using github.com/klauspost/compress/zstd +// with gRPC. +package zstd + +import ( + "bytes" + "errors" + "io" + "runtime" + "sync" + + "github.com/klauspost/compress/zstd" + "google.golang.org/grpc/encoding" +) + +const Name = "zstd" + +var encoderOptions = []zstd.EOption{ + // The default zstd window size is 8MB, which is much larger than the + // typical RPC message and wastes a bunch of memory. + zstd.WithWindowSize(512 * 1024), +} + +var decoderOptions = []zstd.DOption{ + // If the decoder concurrency level is not 1, we would need to call + // Close() to avoid leaking resources when the object is released + // from compressor.decoderPool. + zstd.WithDecoderConcurrency(1), +} + +// We will set a finalizer on these objects, so when the go-grpc code is +// finished with them, they will be added back to compressor.decoderPool. +type decoderWrapper struct { + *zstd.Decoder +} + +type compressor struct { + encoder *zstd.Encoder + decoderPool sync.Pool // To hold *zstd.Decoder's. +} + +func PretendInit(clobbering bool) { + if !clobbering && encoding.GetCompressor(Name) != nil { + return + } + + enc, _ := zstd.NewWriter(nil, encoderOptions...) + c := &compressor{ + encoder: enc, + } + encoding.RegisterCompressor(c) +} + +var ErrNotInUse = errors.New("SetLevel ineffective because another zstd compressor has been registered") + +// SetLevel updates the registered compressor to use a particular compression +// level. NOTE: this function must only be called from an init function, and +// is not threadsafe. +func SetLevel(level zstd.EncoderLevel) error { + c, ok := encoding.GetCompressor(Name).(*compressor) + if !ok { + return ErrNotInUse + } + + enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(level)) + if err != nil { + return err + } + + c.encoder = enc + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + return &zstdWriteCloser{ + enc: c.encoder, + writer: w, + }, nil +} + +type zstdWriteCloser struct { + enc *zstd.Encoder + writer io.Writer // Compressed data will be written here. + buf bytes.Buffer // Buffer uncompressed data here, compress on Close. +} + +func (z *zstdWriteCloser) Write(p []byte) (int, error) { + return z.buf.Write(p) +} + +func (z *zstdWriteCloser) Close() error { + compressed := z.enc.EncodeAll(z.buf.Bytes(), nil) + _, err := io.Copy(z.writer, bytes.NewReader(compressed)) + return err +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + var err error + var found bool + var decoder *zstd.Decoder + + // Note: avoid the use of zstd.Decoder.DecodeAll here, since + // malicious payloads could DoS us with a decompression bomb. + + decoder, found = c.decoderPool.Get().(*zstd.Decoder) + if !found { + decoder, err = zstd.NewReader(r, decoderOptions...) + if err != nil { + return nil, err + } + } else { + err = decoder.Reset(r) + if err != nil { + c.decoderPool.Put(decoder) + return nil, err + } + } + + wrapper := &decoderWrapper{Decoder: decoder} + runtime.SetFinalizer(wrapper, func(dw *decoderWrapper) { + err := dw.Reset(nil) + if err == nil { + c.decoderPool.Put(dw.Decoder) + } + }) + + return wrapper, nil +} + +func (c *compressor) Name() string { + return Name +} diff --git a/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go new file mode 100644 index 00000000000..18b94d93fb0 --- /dev/null +++ b/vendor/github.com/mostynb/go-grpc-compression/nonclobbering/zstd/zstd.go @@ -0,0 +1,52 @@ +// Copyright 2023 Mostyn Bramley-Moore. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package github.com/mostynb/go-grpc-compression/nonclobbering/zstd is a +// wrapper for using github.com/klauspost/compress/zstd with gRPC. +// +// If you import this package, it will only register itself as the encoder +// for the "zstd" compressor if no other compressors have already been +// registered with that name. +// +// If you do want to override previously registered "zstd" compressors, +// then you should instead import +// github.com/mostynb/go-grpc-compression/zstd +package zstd + +import ( + internalzstd "github.com/mostynb/go-grpc-compression/internal/zstd" + + "github.com/klauspost/compress/zstd" +) + +const Name = internalzstd.Name + +func init() { + clobbering := false + internalzstd.PretendInit(clobbering) +} + +var ErrNotInUse = internalzstd.ErrNotInUse + +// SetLevel updates the registered compressor to use a particular compression +// level. Returns ErrNotInUse if this module isn't registered (because it has +// been overridden by another encoder with the same name), or any error +// returned by zstd.NewWriter(nil, zstd.WithEncoderLevel(level). +// +// NOTE: this function is not threadsafe and must only be called from an init +// function or from the main goroutine before any other goroutines have been +// created. +func SetLevel(level zstd.EncoderLevel) error { + return internalzstd.SetLevel(level) +} diff --git a/vendor/github.com/oklog/run/.gitignore b/vendor/github.com/oklog/run/.gitignore deleted file mode 100644 index a1338d68517..00000000000 --- a/vendor/github.com/oklog/run/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ diff --git a/vendor/github.com/oklog/run/README.md b/vendor/github.com/oklog/run/README.md deleted file mode 100644 index eba7d11cf3a..00000000000 --- a/vendor/github.com/oklog/run/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# run - -[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run) -[![Build Status](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Foklog%2Frun%2Fbadge&style=flat-square&label=build)](https://github.com/oklog/run/actions?query=workflow%3ATest) -[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run) -[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE) - -run.Group is a universal mechanism to manage goroutine lifecycles. - -Create a zero-value run.Group, and then add actors to it. Actors are defined as -a pair of functions: an **execute** function, which should run synchronously; -and an **interrupt** function, which, when invoked, should cause the execute -function to return. Finally, invoke Run, which concurrently runs all of the -actors, waits until the first actor exits, invokes the interrupt functions, and -finally returns control to the caller only once all actors have returned. This -general-purpose API allows callers to model pretty much any runnable task, and -achieve well-defined lifecycle semantics for the group. - -run.Group was written to manage component lifecycles in func main for -[OK Log](https://github.com/oklog/oklog). -But it's useful in any circumstance where you need to orchestrate multiple -goroutines as a unit whole. -[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a -video of a talk where run.Group is described. - -## Examples - -### context.Context - -```go -ctx, cancel := context.WithCancel(context.Background()) -g.Add(func() error { - return myProcess(ctx, ...) -}, func(error) { - cancel() -}) -``` - -### net.Listener - -```go -ln, _ := net.Listen("tcp", ":8080") -g.Add(func() error { - return http.Serve(ln, nil) -}, func(error) { - ln.Close() -}) -``` - -### io.ReadCloser - -```go -var conn io.ReadCloser = ... -g.Add(func() error { - s := bufio.NewScanner(conn) - for s.Scan() { - println(s.Text()) - } - return s.Err() -}, func(error) { - conn.Close() -}) -``` - -## Comparisons - -Package run is somewhat similar to package -[errgroup](https://godoc.org/golang.org/x/sync/errgroup), -except it doesn't require actor goroutines to understand context semantics. - -It's somewhat similar to package -[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or -[tomb.v2](https://godoc.org/gopkg.in/tomb.v2), -except it has a much smaller API surface, delegating e.g. staged shutdown of -goroutines to the caller. diff --git a/vendor/github.com/oklog/run/actors.go b/vendor/github.com/oklog/run/actors.go deleted file mode 100644 index ef93495d3f0..00000000000 --- a/vendor/github.com/oklog/run/actors.go +++ /dev/null @@ -1,38 +0,0 @@ -package run - -import ( - "context" - "fmt" - "os" - "os/signal" -) - -// SignalHandler returns an actor, i.e. an execute and interrupt func, that -// terminates with SignalError when the process receives one of the provided -// signals, or the parent context is canceled. -func SignalHandler(ctx context.Context, signals ...os.Signal) (execute func() error, interrupt func(error)) { - ctx, cancel := context.WithCancel(ctx) - return func() error { - c := make(chan os.Signal, 1) - signal.Notify(c, signals...) - select { - case sig := <-c: - return SignalError{Signal: sig} - case <-ctx.Done(): - return ctx.Err() - } - }, func(error) { - cancel() - } -} - -// SignalError is returned by the signal handler's execute function -// when it terminates due to a received signal. -type SignalError struct { - Signal os.Signal -} - -// Error implements the error interface. -func (e SignalError) Error() string { - return fmt.Sprintf("received signal %s", e.Signal) -} diff --git a/vendor/github.com/oklog/run/group.go b/vendor/github.com/oklog/run/group.go deleted file mode 100644 index 832d47dd169..00000000000 --- a/vendor/github.com/oklog/run/group.go +++ /dev/null @@ -1,62 +0,0 @@ -// Package run implements an actor-runner with deterministic teardown. It is -// somewhat similar to package errgroup, except it does not require actor -// goroutines to understand context semantics. This makes it suitable for use in -// more circumstances; for example, goroutines which are handling connections -// from net.Listeners, or scanning input from a closable io.Reader. -package run - -// Group collects actors (functions) and runs them concurrently. -// When one actor (function) returns, all actors are interrupted. -// The zero value of a Group is useful. -type Group struct { - actors []actor -} - -// Add an actor (function) to the group. Each actor must be pre-emptable by an -// interrupt function. That is, if interrupt is invoked, execute should return. -// Also, it must be safe to call interrupt even after execute has returned. -// -// The first actor (function) to return interrupts all running actors. -// The error is passed to the interrupt functions, and is returned by Run. -func (g *Group) Add(execute func() error, interrupt func(error)) { - g.actors = append(g.actors, actor{execute, interrupt}) -} - -// Run all actors (functions) concurrently. -// When the first actor returns, all others are interrupted. -// Run only returns when all actors have exited. -// Run returns the error returned by the first exiting actor. -func (g *Group) Run() error { - if len(g.actors) == 0 { - return nil - } - - // Run each actor. - errors := make(chan error, len(g.actors)) - for _, a := range g.actors { - go func(a actor) { - errors <- a.execute() - }(a) - } - - // Wait for the first actor to stop. - err := <-errors - - // Signal all actors to stop. - for _, a := range g.actors { - a.interrupt(err) - } - - // Wait for all actors to stop. - for i := 1; i < cap(errors); i++ { - <-errors - } - - // Return the original error. - return err -} - -type actor struct { - execute func() error - interrupt func(error) -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md index d5ac9644199..1fcb128a8a5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fkafka%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fkafka) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fkafka%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fkafka) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@pavolloffay](https://www.github.com/pavolloffay), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib @@ -24,8 +24,8 @@ The following settings can be optionally configured: - `brokers` (default = localhost:9092): The list of kafka brokers. - `resolve_canonical_bootstrap_servers_only` (default = false): Whether to resolve then reverse-lookup broker IPs during startup. - `client_id` (default = "sarama"): The client ID to configure the Sarama Kafka client with. The client ID will be used for all produce requests. -- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to export to. -- `topic_from_attribute` (default = ""): Specify the resource attribute whose value should be used as the message's topic. This option, when set, will take precedence over the default topic. If `topic_from_attribute` is not set, the message's topic will be set to the value of the configuration option `topic` instead. +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the default kafka topic to export to. See [Destination Topic](#destination-topic) below for more details. +- `topic_from_attribute` (default = ""): Specify the resource attribute whose value should be used as the message's topic. See [Destination Topic](#destination-topic) below for more details. - `encoding` (default = otlp_proto): The encoding of the traces sent to kafka. All available encodings: - `otlp_proto`: payload is Protobuf serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. - `otlp_json`: payload is JSON serialized from `ExportTraceServiceRequest` if set as a traces exporter or `ExportMetricsServiceRequest` for metrics or `ExportLogsServiceRequest` for logs. @@ -38,6 +38,7 @@ The following settings can be optionally configured: - `raw`: if the log record body is a byte array, it is sent as is. Otherwise, it is serialized to JSON. Resource and record attributes are discarded. - `partition_traces_by_id` (default = false): configures the exporter to include the trace ID as the message key in trace messages sent to kafka. *Please note:* this setting does not have any effect on Jaeger encoding exporters since Jaeger exporters include trace ID as the message key by default. - `partition_metrics_by_resource_attributes` (default = false) configures the exporter to include the hash of sorted resource attributes as the message partitioning key in metric messages sent to kafka. +- `partition_logs_by_resource_attributes` (default = false) configures the exporter to include the hash of sorted resource attributes as the message partitioning key in log messages sent to kafka. - `auth` - `plain_text` - `username`: The username to use. @@ -45,9 +46,9 @@ The following settings can be optionally configured: - `sasl` - `username`: The username to use. - `password`: The password to use - - `mechanism`: The SASL mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM or PLAIN) + - `mechanism`: The SASL mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM, AWS_MSK_IAM_OAUTHBEARER or PLAIN) - `version` (default = 0): The SASL protocol version to use (0 or 1) - - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM mechanism + - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM or AWS_MSK_IAM_OAUTHBEARER mechanism - `aws_msk.broker_addr`: MSK Broker address in case of AWS_MSK_IAM mechanism - `tls`: see [TLS Configuration Settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) for the full set of available options. - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should @@ -68,6 +69,7 @@ The following settings can be optionally configured: - `password`: The Kerberos password used for authenticate with KDC - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab + - `disable_fast_negotiation`: Disable PA-FX-FAST negotiation (Pre-Authentication Framework - Fast). Some common Kerberos implementations do not support PA-FX-FAST negotiation. This is set to `false` by default. - `metadata` - `full` (default = true): Whether to maintain a full set of metadata. When disabled, the client does not make the initial request to broker at the @@ -103,3 +105,9 @@ exporters: - localhost:9092 protocol_version: 2.0.0 ``` + +## Destination Topic +The destination topic can be defined in a few different ways and takes priority in the following order: +1. When `topic_from_attribute` is configured, and the corresponding attribute is found on the ingested data, the value of this attribute is used. +2. If a prior component in the collector pipeline sets the topic on the context via the `topic.WithTopic` function (from the `github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic` package), the value set in the context is used. +3. Finally, the `topic` configuration is used as a default/fallback destination. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go index d048b27a88f..aac2cf77c09 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/config.go @@ -17,9 +17,9 @@ import ( // Config defines configuration for Kafka exporter. type Config struct { - exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - exporterhelper.QueueSettings `mapstructure:"sending_queue"` - configretry.BackOffConfig `mapstructure:"retry_on_failure"` + TimeoutSettings exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + QueueSettings exporterhelper.QueueConfig `mapstructure:"sending_queue"` + configretry.BackOffConfig `mapstructure:"retry_on_failure"` // The list of kafka brokers (default localhost:9092) Brokers []string `mapstructure:"brokers"` @@ -53,6 +53,8 @@ type Config struct { PartitionMetricsByResourceAttributes bool `mapstructure:"partition_metrics_by_resource_attributes"` + PartitionLogsByResourceAttributes bool `mapstructure:"partition_logs_by_resource_attributes"` + // Metadata is the namespace for metadata management properties used by the // Client, and shared by the Producer/Consumer. Metadata Metadata `mapstructure:"metadata"` @@ -142,10 +144,10 @@ func validateSASLConfig(c *kafka.SASLConfig) error { } switch c.Mechanism { - case "PLAIN", "AWS_MSK_IAM", "SCRAM-SHA-256", "SCRAM-SHA-512": + case "PLAIN", "AWS_MSK_IAM", "AWS_MSK_IAM_OAUTHBEARER", "SCRAM-SHA-256", "SCRAM-SHA-512": // Do nothing, valid mechanism default: - return fmt.Errorf("auth.sasl.mechanism should be one of 'PLAIN', 'AWS_MSK_IAM', 'SCRAM-SHA-256' or 'SCRAM-SHA-512'. configured value %v", c.Mechanism) + return fmt.Errorf("auth.sasl.mechanism should be one of 'PLAIN', 'AWS_MSK_IAM', 'AWS_MSK_IAM_OAUTHBEARER', 'SCRAM-SHA-256' or 'SCRAM-SHA-512'. configured value %v", c.Mechanism) } if c.Version < 0 || c.Version > 1 { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go index d990a17dab8..eb366d7cfa8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/factory.go @@ -40,45 +40,16 @@ const ( defaultFluxMaxMessages = 0 // partitioning metrics by resource attributes is disabled by default defaultPartitionMetricsByResourceAttributesEnabled = false + // partitioning logs by resource attributes is disabled by default + defaultPartitionLogsByResourceAttributesEnabled = false ) // FactoryOption applies changes to kafkaExporterFactory. type FactoryOption func(factory *kafkaExporterFactory) -// withTracesMarshalers adds tracesMarshalers. -func withTracesMarshalers(tracesMarshalers ...TracesMarshaler) FactoryOption { - return func(factory *kafkaExporterFactory) { - for _, marshaler := range tracesMarshalers { - factory.tracesMarshalers[marshaler.Encoding()] = marshaler - } - } -} - -// withMetricsMarshalers adds additional metric marshalers to the exporter factory. -func withMetricsMarshalers(metricMarshalers ...MetricsMarshaler) FactoryOption { - return func(factory *kafkaExporterFactory) { - for _, marshaler := range metricMarshalers { - factory.metricsMarshalers[marshaler.Encoding()] = marshaler - } - } -} - -// withLogsMarshalers adds additional log marshalers to the exporter factory. -func withLogsMarshalers(logsMarshalers ...LogsMarshaler) FactoryOption { - return func(factory *kafkaExporterFactory) { - for _, marshaler := range logsMarshalers { - factory.logsMarshalers[marshaler.Encoding()] = marshaler - } - } -} - // NewFactory creates Kafka exporter factory. func NewFactory(options ...FactoryOption) exporter.Factory { - f := &kafkaExporterFactory{ - tracesMarshalers: tracesMarshalers(), - metricsMarshalers: metricsMarshalers(), - logsMarshalers: logsMarshalers(), - } + f := &kafkaExporterFactory{} for _, o := range options { o(f) } @@ -93,15 +64,16 @@ func NewFactory(options ...FactoryOption) exporter.Factory { func createDefaultConfig() component.Config { return &Config{ - TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), + TimeoutSettings: exporterhelper.NewDefaultTimeoutConfig(), BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: exporterhelper.NewDefaultQueueSettings(), + QueueSettings: exporterhelper.NewDefaultQueueConfig(), Brokers: []string{defaultBroker}, ClientID: defaultClientID, // using an empty topic to track when it has not been set by user, default is based on traces or metrics. Topic: "", Encoding: defaultEncoding, PartitionMetricsByResourceAttributes: defaultPartitionMetricsByResourceAttributesEnabled, + PartitionLogsByResourceAttributes: defaultPartitionLogsByResourceAttributesEnabled, Metadata: Metadata{ Full: defaultMetadataFull, Retry: MetadataRetry{ @@ -118,15 +90,11 @@ func createDefaultConfig() component.Config { } } -type kafkaExporterFactory struct { - tracesMarshalers map[string]TracesMarshaler - metricsMarshalers map[string]MetricsMarshaler - logsMarshalers map[string]LogsMarshaler -} +type kafkaExporterFactory struct{} func (f *kafkaExporterFactory) createTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -136,11 +104,8 @@ func (f *kafkaExporterFactory) createTracesExporter( if oCfg.Encoding == "otlp_json" { set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") } - exp, err := newTracesExporter(oCfg, set, f.tracesMarshalers) - if err != nil { - return nil, err - } - return exporterhelper.NewTracesExporter( + exp := newTracesExporter(oCfg, set) + return exporterhelper.NewTraces( ctx, set, &oCfg, @@ -148,7 +113,7 @@ func (f *kafkaExporterFactory) createTracesExporter( exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.BackOffConfig), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithStart(exp.start), @@ -157,7 +122,7 @@ func (f *kafkaExporterFactory) createTracesExporter( func (f *kafkaExporterFactory) createMetricsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -167,11 +132,8 @@ func (f *kafkaExporterFactory) createMetricsExporter( if oCfg.Encoding == "otlp_json" { set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") } - exp, err := newMetricsExporter(oCfg, set, f.metricsMarshalers) - if err != nil { - return nil, err - } - return exporterhelper.NewMetricsExporter( + exp := newMetricsExporter(oCfg, set) + return exporterhelper.NewMetrics( ctx, set, &oCfg, @@ -179,7 +141,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.BackOffConfig), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithStart(exp.start), @@ -188,7 +150,7 @@ func (f *kafkaExporterFactory) createMetricsExporter( func (f *kafkaExporterFactory) createLogsExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oCfg := *(cfg.(*Config)) // Clone the config @@ -198,11 +160,8 @@ func (f *kafkaExporterFactory) createLogsExporter( if oCfg.Encoding == "otlp_json" { set.Logger.Info("otlp_json is considered experimental and should not be used in a production environment") } - exp, err := newLogsExporter(oCfg, set, f.logsMarshalers) - if err != nil { - return nil, err - } - return exporterhelper.NewLogsExporter( + exp := newLogsExporter(oCfg, set) + return exporterhelper.NewLogs( ctx, set, &oCfg, @@ -210,7 +169,7 @@ func (f *kafkaExporterFactory) createLogsExporter( exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // Disable exporterhelper Timeout, because we cannot pass a Context to the Producer, // and will rely on the sarama Producer Timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.BackOffConfig), exporterhelper.WithQueue(oCfg.QueueSettings), exporterhelper.WithStart(exp.start), diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go index 63e9bce921a..e139063fd64 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("kafka") + Type = component.MustNewType("kafka") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go deleted file mode 100644 index 5f4488d9269..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/kafka") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/kafka") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go index abc73c22f18..50640c83763 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/jaeger_marshaler.go @@ -22,10 +22,7 @@ type jaegerMarshaler struct { var _ TracesMarshaler = (*jaegerMarshaler)(nil) func (j jaegerMarshaler) Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { - batches, err := jaeger.ProtoFromTraces(traces) - if err != nil { - return nil, err - } + batches := jaeger.ProtoFromTraces(traces) var messages []*sarama.ProducerMessage var errs error @@ -58,8 +55,7 @@ type jaegerSpanMarshaler interface { encoding() string } -type jaegerProtoSpanMarshaler struct { -} +type jaegerProtoSpanMarshaler struct{} var _ jaegerSpanMarshaler = (*jaegerProtoSpanMarshaler)(nil) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go index 59fc26e647b..6720237fabe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/kafka_exporter.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic" ) var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") @@ -40,8 +41,8 @@ func (ke kafkaErrors) Error() string { return fmt.Sprintf("Failed to deliver %d messages due to %s", ke.count, ke.err) } -func (e *kafkaTracesProducer) tracesPusher(_ context.Context, td ptrace.Traces) error { - messages, err := e.marshaler.Marshal(td, getTopic(&e.cfg, td.ResourceSpans())) +func (e *kafkaTracesProducer) tracesPusher(ctx context.Context, td ptrace.Traces) error { + messages, err := e.marshaler.Marshal(td, getTopic(ctx, &e.cfg, td.ResourceSpans())) if err != nil { return consumererror.NewPermanent(err) } @@ -65,8 +66,24 @@ func (e *kafkaTracesProducer) Close(context.Context) error { return e.producer.Close() } -func (e *kafkaTracesProducer) start(_ context.Context, _ component.Host) error { - producer, err := newSaramaProducer(e.cfg) +func (e *kafkaTracesProducer) start(ctx context.Context, host component.Host) error { + // extensions take precedence over internal encodings + if marshaler, errExt := loadEncodingExtension[ptrace.Marshaler]( + host, + e.cfg.Encoding, + ); errExt == nil { + e.marshaler = &tracesEncodingMarshaler{ + marshaler: *marshaler, + encoding: e.cfg.Encoding, + } + } + if marshaler, errInt := createTracesMarshaler(e.cfg); e.marshaler == nil && errInt == nil { + e.marshaler = marshaler + } + if e.marshaler == nil { + return errUnrecognizedEncoding + } + producer, err := newSaramaProducer(ctx, e.cfg) if err != nil { return err } @@ -82,8 +99,8 @@ type kafkaMetricsProducer struct { logger *zap.Logger } -func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pmetric.Metrics) error { - messages, err := e.marshaler.Marshal(md, getTopic(&e.cfg, md.ResourceMetrics())) +func (e *kafkaMetricsProducer) metricsDataPusher(ctx context.Context, md pmetric.Metrics) error { + messages, err := e.marshaler.Marshal(md, getTopic(ctx, &e.cfg, md.ResourceMetrics())) if err != nil { return consumererror.NewPermanent(err) } @@ -107,8 +124,24 @@ func (e *kafkaMetricsProducer) Close(context.Context) error { return e.producer.Close() } -func (e *kafkaMetricsProducer) start(_ context.Context, _ component.Host) error { - producer, err := newSaramaProducer(e.cfg) +func (e *kafkaMetricsProducer) start(ctx context.Context, host component.Host) error { + // extensions take precedence over internal encodings + if marshaler, errExt := loadEncodingExtension[pmetric.Marshaler]( + host, + e.cfg.Encoding, + ); errExt == nil { + e.marshaler = &metricsEncodingMarshaler{ + marshaler: *marshaler, + encoding: e.cfg.Encoding, + } + } + if marshaler, errInt := createMetricMarshaler(e.cfg); e.marshaler == nil && errInt == nil { + e.marshaler = marshaler + } + if e.marshaler == nil { + return errUnrecognizedEncoding + } + producer, err := newSaramaProducer(ctx, e.cfg) if err != nil { return err } @@ -124,8 +157,8 @@ type kafkaLogsProducer struct { logger *zap.Logger } -func (e *kafkaLogsProducer) logsDataPusher(_ context.Context, ld plog.Logs) error { - messages, err := e.marshaler.Marshal(ld, getTopic(&e.cfg, ld.ResourceLogs())) +func (e *kafkaLogsProducer) logsDataPusher(ctx context.Context, ld plog.Logs) error { + messages, err := e.marshaler.Marshal(ld, getTopic(ctx, &e.cfg, ld.ResourceLogs())) if err != nil { return consumererror.NewPermanent(err) } @@ -149,8 +182,24 @@ func (e *kafkaLogsProducer) Close(context.Context) error { return e.producer.Close() } -func (e *kafkaLogsProducer) start(_ context.Context, _ component.Host) error { - producer, err := newSaramaProducer(e.cfg) +func (e *kafkaLogsProducer) start(ctx context.Context, host component.Host) error { + // extensions take precedence over internal encodings + if marshaler, errExt := loadEncodingExtension[plog.Marshaler]( + host, + e.cfg.Encoding, + ); errExt == nil { + e.marshaler = &logsEncodingMarshaler{ + marshaler: *marshaler, + encoding: e.cfg.Encoding, + } + } + if marshaler, errInt := createLogMarshaler(e.cfg); e.marshaler == nil && errInt == nil { + e.marshaler = marshaler + } + if e.marshaler == nil { + return errUnrecognizedEncoding + } + producer, err := newSaramaProducer(ctx, e.cfg) if err != nil { return err } @@ -158,7 +207,7 @@ func (e *kafkaLogsProducer) start(_ context.Context, _ component.Host) error { return nil } -func newSaramaProducer(config Config) (sarama.SyncProducer, error) { +func newSaramaProducer(ctx context.Context, config Config) (sarama.SyncProducer, error) { c := sarama.NewConfig() c.ClientID = config.ClientID @@ -168,7 +217,7 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { c.Producer.Return.Errors = true c.Producer.RequiredAcks = config.Producer.RequiredAcks // Because sarama does not accept a Context for every message, set the Timeout here. - c.Producer.Timeout = config.Timeout + c.Producer.Timeout = config.TimeoutSettings.Timeout c.Metadata.Full = config.Metadata.Full c.Metadata.Retry.Max = config.Metadata.Retry.Max c.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff @@ -187,7 +236,7 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { c.Version = version } - if err := kafka.ConfigureAuthentication(config.Authentication, c); err != nil { + if err := kafka.ConfigureAuthentication(ctx, config.Authentication, c); err != nil { return nil, err } @@ -204,56 +253,26 @@ func newSaramaProducer(config Config) (sarama.SyncProducer, error) { return producer, nil } -func newMetricsExporter(config Config, set exporter.CreateSettings, marshalers map[string]MetricsMarshaler) (*kafkaMetricsProducer, error) { - marshaler := marshalers[config.Encoding] - if marshaler == nil { - return nil, errUnrecognizedEncoding - } - if config.PartitionMetricsByResourceAttributes { - if keyableMarshaler, ok := marshaler.(KeyableMetricsMarshaler); ok { - keyableMarshaler.Key() - } - } - +func newMetricsExporter(config Config, set exporter.Settings) *kafkaMetricsProducer { return &kafkaMetricsProducer{ - cfg: config, - marshaler: marshaler, - logger: set.Logger, - }, nil - + cfg: config, + logger: set.Logger, + } } // newTracesExporter creates Kafka exporter. -func newTracesExporter(config Config, set exporter.CreateSettings, marshalers map[string]TracesMarshaler) (*kafkaTracesProducer, error) { - marshaler := marshalers[config.Encoding] - if marshaler == nil { - return nil, errUnrecognizedEncoding - } - if config.PartitionTracesByID { - if keyableMarshaler, ok := marshaler.(KeyableTracesMarshaler); ok { - keyableMarshaler.Key() - } - } - +func newTracesExporter(config Config, set exporter.Settings) *kafkaTracesProducer { return &kafkaTracesProducer{ - cfg: config, - marshaler: marshaler, - logger: set.Logger, - }, nil -} - -func newLogsExporter(config Config, set exporter.CreateSettings, marshalers map[string]LogsMarshaler) (*kafkaLogsProducer, error) { - marshaler := marshalers[config.Encoding] - if marshaler == nil { - return nil, errUnrecognizedEncoding + cfg: config, + logger: set.Logger, } +} +func newLogsExporter(config Config, set exporter.Settings) *kafkaLogsProducer { return &kafkaLogsProducer{ - cfg: config, - marshaler: marshaler, - logger: set.Logger, - }, nil - + cfg: config, + logger: set.Logger, + } } type resourceSlice[T any] interface { @@ -265,15 +284,45 @@ type resource interface { Resource() pcommon.Resource } -func getTopic[T resource](cfg *Config, resources resourceSlice[T]) string { - if cfg.TopicFromAttribute == "" { - return cfg.Topic - } - for i := 0; i < resources.Len(); i++ { - rv, ok := resources.At(i).Resource().Attributes().Get(cfg.TopicFromAttribute) - if ok && rv.Str() != "" { - return rv.Str() +func getTopic[T resource](ctx context.Context, cfg *Config, resources resourceSlice[T]) string { + if cfg.TopicFromAttribute != "" { + for i := 0; i < resources.Len(); i++ { + rv, ok := resources.At(i).Resource().Attributes().Get(cfg.TopicFromAttribute) + if ok && rv.Str() != "" { + return rv.Str() + } } } + contextTopic, ok := topic.FromContext(ctx) + if ok { + return contextTopic + } return cfg.Topic } + +// loadEncodingExtension tries to load an available extension for the given encoding. +func loadEncodingExtension[T any](host component.Host, encoding string) (*T, error) { + extensionID, err := encodingToComponentID(encoding) + if err != nil { + return nil, err + } + encodingExtension, ok := host.GetExtensions()[*extensionID] + if !ok { + return nil, fmt.Errorf("unknown encoding extension %q", encoding) + } + unmarshaler, ok := encodingExtension.(T) + if !ok { + return nil, fmt.Errorf("extension %q is not an unmarshaler", encoding) + } + return &unmarshaler, nil +} + +// encodingToComponentID converts an encoding string to a component ID using the given encoding as type. +func encodingToComponentID(encoding string) (*component.ID, error) { + componentType, err := component.NewType(encoding) + if err != nil { + return nil, fmt.Errorf("invalid component type: %w", err) + } + id := component.NewID(componentType) + return &id, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go index 4acb625407f..ba8f00ff5cf 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/marshaler.go @@ -4,6 +4,8 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter" import ( + "fmt" + "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -39,42 +41,129 @@ type LogsMarshaler interface { Encoding() string } -// tracesMarshalers returns map of supported encodings with TracesMarshaler. -func tracesMarshalers() map[string]TracesMarshaler { - otlpPb := newPdataTracesMarshaler(&ptrace.ProtoMarshaler{}, defaultEncoding) - otlpJSON := newPdataTracesMarshaler(&ptrace.JSONMarshaler{}, "otlp_json") - zipkinProto := newPdataTracesMarshaler(zipkinv2.NewProtobufTracesMarshaler(), "zipkin_proto") - zipkinJSON := newPdataTracesMarshaler(zipkinv2.NewJSONTracesMarshaler(), "zipkin_json") +// creates TracesMarshaler based on the provided config +func createTracesMarshaler(config Config) (TracesMarshaler, error) { + encoding := config.Encoding + partitionTracesByID := config.PartitionTracesByID + jaegerProto := jaegerMarshaler{marshaler: jaegerProtoSpanMarshaler{}} jaegerJSON := jaegerMarshaler{marshaler: newJaegerJSONMarshaler()} - return map[string]TracesMarshaler{ - otlpPb.Encoding(): otlpPb, - otlpJSON.Encoding(): otlpJSON, - zipkinProto.Encoding(): zipkinProto, - zipkinJSON.Encoding(): zipkinJSON, - jaegerProto.Encoding(): jaegerProto, - jaegerJSON.Encoding(): jaegerJSON, + + switch encoding { + case defaultEncoding: + return newPdataTracesMarshaler(&ptrace.ProtoMarshaler{}, defaultEncoding, partitionTracesByID), nil + case "otlp_json": + return newPdataTracesMarshaler(&ptrace.JSONMarshaler{}, "otlp_json", partitionTracesByID), nil + case "zipkin_proto": + return newPdataTracesMarshaler(zipkinv2.NewProtobufTracesMarshaler(), "zipkin_proto", partitionTracesByID), nil + case "zipkin_json": + return newPdataTracesMarshaler(zipkinv2.NewJSONTracesMarshaler(), "zipkin_json", partitionTracesByID), nil + case jaegerProtoSpanMarshaler{}.encoding(): + return jaegerProto, nil + case jaegerJSON.Encoding(): + return jaegerJSON, nil + default: + return nil, errUnrecognizedEncoding } } -// metricsMarshalers returns map of supported encodings and MetricsMarshaler -func metricsMarshalers() map[string]MetricsMarshaler { - otlpPb := newPdataMetricsMarshaler(&pmetric.ProtoMarshaler{}, defaultEncoding) - otlpJSON := newPdataMetricsMarshaler(&pmetric.JSONMarshaler{}, "otlp_json") - return map[string]MetricsMarshaler{ - otlpPb.Encoding(): otlpPb, - otlpJSON.Encoding(): otlpJSON, +// creates MetricsMarshaler based on the provided config +func createMetricMarshaler(config Config) (MetricsMarshaler, error) { + encoding := config.Encoding + partitionMetricsByResources := config.PartitionMetricsByResourceAttributes + switch encoding { + case defaultEncoding: + return newPdataMetricsMarshaler(&pmetric.ProtoMarshaler{}, defaultEncoding, partitionMetricsByResources), nil + case "otlp_json": + return newPdataMetricsMarshaler(&pmetric.JSONMarshaler{}, "otlp_json", partitionMetricsByResources), nil + default: + return nil, errUnrecognizedEncoding } } -// logsMarshalers returns map of supported encodings and LogsMarshaler -func logsMarshalers() map[string]LogsMarshaler { - otlpPb := newPdataLogsMarshaler(&plog.ProtoMarshaler{}, defaultEncoding) - otlpJSON := newPdataLogsMarshaler(&plog.JSONMarshaler{}, "otlp_json") +// creates LogsMarshalers based on the provided config +func createLogMarshaler(config Config) (LogsMarshaler, error) { + encoding := config.Encoding + partitionLogsByAttributes := config.PartitionLogsByResourceAttributes + raw := newRawMarshaler() - return map[string]LogsMarshaler{ - otlpPb.Encoding(): otlpPb, - otlpJSON.Encoding(): otlpJSON, - raw.Encoding(): raw, + switch encoding { + case defaultEncoding: + return newPdataLogsMarshaler(&plog.ProtoMarshaler{}, defaultEncoding, partitionLogsByAttributes), nil + case "otlp_json": + return newPdataLogsMarshaler(&plog.JSONMarshaler{}, "otlp_json", partitionLogsByAttributes), nil + case raw.Encoding(): + return raw, nil + default: + return nil, errUnrecognizedEncoding } } + +// tracesEncodingMarshaler is a wrapper around ptrace.Marshaler that implements TracesMarshaler. +type tracesEncodingMarshaler struct { + marshaler ptrace.Marshaler + encoding string +} + +func (t *tracesEncodingMarshaler) Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + data, err := t.marshaler.MarshalTraces(traces) + if err != nil { + return nil, fmt.Errorf("failed to marshal traces: %w", err) + } + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(data), + }) + return messages, nil +} + +func (t *tracesEncodingMarshaler) Encoding() string { + return t.encoding +} + +// metricsEncodingMarshaler is a wrapper around pmetric.Marshaler that implements MetricsMarshaler. +type metricsEncodingMarshaler struct { + marshaler pmetric.Marshaler + encoding string +} + +func (m *metricsEncodingMarshaler) Marshal(metrics pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + data, err := m.marshaler.MarshalMetrics(metrics) + if err != nil { + return nil, fmt.Errorf("failed to marshal metrics: %w", err) + } + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(data), + }) + return messages, nil +} + +func (m *metricsEncodingMarshaler) Encoding() string { + return m.encoding +} + +// logsEncodingMarshaler is a wrapper around plog.Marshaler that implements LogsMarshaler. +type logsEncodingMarshaler struct { + marshaler plog.Marshaler + encoding string +} + +func (l *logsEncodingMarshaler) Marshal(logs plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { + var messages []*sarama.ProducerMessage + data, err := l.marshaler.MarshalLogs(logs) + if err != nil { + return nil, fmt.Errorf("failed to marshal logs: %w", err) + } + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(data), + }) + return messages, nil +} + +func (l *logsEncodingMarshaler) Encoding() string { + return l.encoding +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml index 7c168398950..ea264ca5e48 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/metadata.yaml @@ -1,5 +1,4 @@ type: kafka -scope_name: otelcol/kafka status: class: exporter diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go index 3429cdd8316..ae9726f2cbe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/pdata_marshaler.go @@ -15,60 +15,72 @@ import ( ) type pdataLogsMarshaler struct { - marshaler plog.Marshaler - encoding string + marshaler plog.Marshaler + encoding string + partitionedByResources bool } func (p pdataLogsMarshaler) Marshal(ld plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { - bts, err := p.marshaler.MarshalLogs(ld) - if err != nil { - return nil, err - } - return []*sarama.ProducerMessage{ - { + var msgs []*sarama.ProducerMessage + if p.partitionedByResources { + logs := ld.ResourceLogs() + + for i := 0; i < logs.Len(); i++ { + resourceMetrics := logs.At(i) + hash := pdatautil.MapHash(resourceMetrics.Resource().Attributes()) + + newLogs := plog.NewLogs() + resourceMetrics.CopyTo(newLogs.ResourceLogs().AppendEmpty()) + + bts, err := p.marshaler.MarshalLogs(newLogs) + if err != nil { + return nil, err + } + msgs = append(msgs, &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(bts), + Key: sarama.ByteEncoder(hash[:]), + }) + } + } else { + bts, err := p.marshaler.MarshalLogs(ld) + if err != nil { + return nil, err + } + msgs = append(msgs, &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(bts), - }, - }, nil + }) + } + return msgs, nil } func (p pdataLogsMarshaler) Encoding() string { return p.encoding } -func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string) LogsMarshaler { +func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string, partitionedByResources bool) LogsMarshaler { return pdataLogsMarshaler{ - marshaler: marshaler, - encoding: encoding, + marshaler: marshaler, + encoding: encoding, + partitionedByResources: partitionedByResources, } } -// KeyableMetricsMarshaler is an extension of the MetricsMarshaler interface intended to provide partition key capabilities -// for metrics messages -type KeyableMetricsMarshaler interface { - MetricsMarshaler - Key() -} - type pdataMetricsMarshaler struct { - marshaler pmetric.Marshaler - encoding string - keyed bool -} - -// Key configures the pdataMetricsMarshaler to set the message key on the kafka messages -func (p *pdataMetricsMarshaler) Key() { - p.keyed = true + marshaler pmetric.Marshaler + encoding string + partitionedByResources bool } func (p pdataMetricsMarshaler) Marshal(ld pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) { var msgs []*sarama.ProducerMessage - if p.keyed { + if p.partitionedByResources { metrics := ld.ResourceMetrics() for i := 0; i < metrics.Len(); i++ { resourceMetrics := metrics.At(i) - var hash = pdatautil.MapHash(resourceMetrics.Resource().Attributes()) + hash := pdatautil.MapHash(resourceMetrics.Resource().Attributes()) newMetrics := pmetric.NewMetrics() resourceMetrics.CopyTo(newMetrics.ResourceMetrics().AppendEmpty()) @@ -101,29 +113,23 @@ func (p pdataMetricsMarshaler) Encoding() string { return p.encoding } -func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string) MetricsMarshaler { +func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string, partitionedByResources bool) MetricsMarshaler { return &pdataMetricsMarshaler{ - marshaler: marshaler, - encoding: encoding, + marshaler: marshaler, + encoding: encoding, + partitionedByResources: partitionedByResources, } } -// KeyableTracesMarshaler is an extension of the TracesMarshaler interface intended to provide partition key capabilities -// for trace messages -type KeyableTracesMarshaler interface { - TracesMarshaler - Key() -} - type pdataTracesMarshaler struct { - marshaler ptrace.Marshaler - encoding string - keyed bool + marshaler ptrace.Marshaler + encoding string + partitionedByTraceID bool } func (p *pdataTracesMarshaler) Marshal(td ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { var msgs []*sarama.ProducerMessage - if p.keyed { + if p.partitionedByTraceID { for _, trace := range batchpersignal.SplitTraces(td) { bts, err := p.marshaler.MarshalTraces(trace) if err != nil { @@ -134,7 +140,6 @@ func (p *pdataTracesMarshaler) Marshal(td ptrace.Traces, topic string) ([]*saram Value: sarama.ByteEncoder(bts), Key: sarama.ByteEncoder(traceutil.TraceIDToHexOrEmptyString(trace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID())), }) - } } else { bts, err := p.marshaler.MarshalTraces(td) @@ -154,14 +159,10 @@ func (p *pdataTracesMarshaler) Encoding() string { return p.encoding } -// Key configures the pdataTracesMarshaler to set the message key on the kafka messages -func (p *pdataTracesMarshaler) Key() { - p.keyed = true -} - -func newPdataTracesMarshaler(marshaler ptrace.Marshaler, encoding string) TracesMarshaler { +func newPdataTracesMarshaler(marshaler ptrace.Marshaler, encoding string, partitionedByTraceID bool) TracesMarshaler { return &pdataTracesMarshaler{ - marshaler: marshaler, - encoding: encoding, + marshaler: marshaler, + encoding: encoding, + partitionedByTraceID: partitionedByTraceID, } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go index 166be57f3e0..5a9e283436e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/raw_marshaler.go @@ -14,8 +14,7 @@ import ( var errUnsupported = errors.New("unsupported serialization") -type rawMarshaler struct { -} +type rawMarshaler struct{} func newRawMarshaler() rawMarshaler { return rawMarshaler{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md index 97b53cd0172..ce07fcf541b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fzipkin%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fzipkin) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fzipkin%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fzipkin) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@MovieStoreGuy](https://www.github.com/MovieStoreGuy), [@andrzej-stencel](https://www.github.com/andrzej-stencel), [@crobert-1](https://www.github.com/crobert-1) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go index 5f18554fa17..a4202794c38 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/config.go @@ -14,8 +14,8 @@ import ( // Config defines configuration settings for the Zipkin exporter. type Config struct { - exporterhelper.QueueSettings `mapstructure:"sending_queue"` - configretry.BackOffConfig `mapstructure:"retry_on_failure"` + QueueSettings exporterhelper.QueueConfig `mapstructure:"sending_queue"` + configretry.BackOffConfig `mapstructure:"retry_on_failure"` // Configures the exporter client. // The Endpoint to send the Zipkin trace data to (e.g.: http://some.url:9411/api/v2/spans). diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go index d3d7f423f22..b853cbcb310 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/factory.go @@ -38,7 +38,7 @@ func createDefaultConfig() component.Config { defaultClientHTTPSettings.WriteBufferSize = 512 * 1024 return &Config{ BackOffConfig: configretry.NewDefaultBackOffConfig(), - QueueSettings: exporterhelper.NewDefaultQueueSettings(), + QueueSettings: exporterhelper.NewDefaultQueueConfig(), ClientConfig: defaultClientHTTPSettings, Format: defaultFormat, DefaultServiceName: defaultServiceName, @@ -47,7 +47,7 @@ func createDefaultConfig() component.Config { func createTracesExporter( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { zc := cfg.(*Config) @@ -56,14 +56,14 @@ func createTracesExporter( if err != nil { return nil, err } - return exporterhelper.NewTracesExporter( + return exporterhelper.NewTraces( ctx, set, cfg, ze.pushTraces, exporterhelper.WithStart(ze.start), // explicitly disable since we rely on http.Client timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithQueue(zc.QueueSettings), exporterhelper.WithRetry(zc.BackOffConfig)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go index 9c236a37f60..ee966007d28 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("zipkin") + Type = component.MustNewType("zipkin") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go deleted file mode 100644 index 8a3acfd214e..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/zipkin") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/zipkin") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml index 61a9ba824a7..2d12aa880db 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/metadata.yaml @@ -1,5 +1,4 @@ type: zipkin -scope_name: otelcol/zipkin status: class: exporter diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go index 23e45bb1352..825a00277a6 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/zipkin.go @@ -74,7 +74,7 @@ func (ze *zipkinExporter) pushTraces(ctx context.Context, td ptrace.Traces) erro return consumererror.NewPermanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) } - req, err := http.NewRequestWithContext(ctx, "POST", ze.url, bytes.NewReader(body)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, ze.url, bytes.NewReader(body)) if err != nil { return fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go deleted file mode 100644 index 238d909f0b6..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate/featuregate.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// package localhostgate defines a feature gate that controls whether server-like receivers and extensions use localhost as the default host for their endpoints. -// This package is duplicated across core and contrib to avoid exposing the feature gate as part of the public API. -// To do this we define a `registerOrLoad` helper and try to register the gate in both modules. -// IMPORTANT NOTE: ANY CHANGES TO THIS PACKAGE MUST BE MIRRORED IN THE CORE COUNTERPART. -// See https://github.com/open-telemetry/opentelemetry-collector/blob/main/internal/localhostgate/featuregate.go -package localhostgate // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" - -import ( - "errors" - "fmt" - - "go.opentelemetry.io/collector/featuregate" - "go.uber.org/zap" -) - -const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" - -// UseLocalHostAsDefaultHostfeatureGate is the feature gate that controls whether -// server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints. -var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( - featuregate.GlobalRegistry(), - UseLocalHostAsDefaultHostID, - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints"), -) - -// mustRegisterOrLoad tries to register the feature gate and loads it if it already exists. -// It panics on any other error. -func mustRegisterOrLoad(reg *featuregate.Registry, id string, stage featuregate.Stage, opts ...featuregate.RegisterOption) *featuregate.Gate { - gate, err := reg.Register(id, stage, opts...) - - if errors.Is(err, featuregate.ErrAlreadyRegistered) { - // Gate is already registered; find it. - // Only a handful of feature gates are registered, so it's fine to iterate over all of them. - reg.VisitAll(func(g *featuregate.Gate) { - if g.ID() == id { - gate = g - return - } - }) - } else if err != nil { - panic(err) - } - - return gate -} - -// EndpointForPort gets the endpoint for a given port using localhost or 0.0.0.0 depending on the feature gate. -func EndpointForPort(port int) string { - host := "localhost" - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - host = "0.0.0.0" - } - return fmt.Sprintf("%s:%d", host, port) -} - -// LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. -func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - logger.Warn( - "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", - zap.String("feature gate ID", UseLocalHostAsDefaultHostID), - ) - } -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil/testutil.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil/testutil.go new file mode 100644 index 00000000000..823815fbb6c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil/testutil.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testutil // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" + +import ( + "fmt" + "net" + "os/exec" + "runtime" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/featuregate" +) + +type portpair struct { + first string + last string +} + +// GetAvailableLocalAddress finds an available local port on tcp network and returns an endpoint +// describing it. The port is available for opening when this function returns +// provided that there is no race by some other code to grab the same port +// immediately. +func GetAvailableLocalAddress(tb testing.TB) string { + return GetAvailableLocalNetworkAddress(tb, "tcp") +} + +// GetAvailableLocalNetworkAddress finds an available local port on specified network and returns an endpoint +// describing it. The port is available for opening when this function returns +// provided that there is no race by some other code to grab the same port +// immediately. +func GetAvailableLocalNetworkAddress(tb testing.TB, network string) string { + // Retry has been added for windows as net.Listen can return a port that is not actually available. Details can be + // found in https://github.com/docker/for-win/issues/3171 but to summarize Hyper-V will reserve ranges of ports + // which do not show up under the "netstat -ano" but can only be found by + // "netsh interface ipv4 show excludedportrange protocol=tcp". We'll use []exclusions to hold those ranges and + // retry if the port returned by GetAvailableLocalAddress falls in one of those them. + var exclusions []portpair + + portFound := false + if runtime.GOOS == "windows" { + exclusions = getExclusionsList(tb) + } + + var endpoint string + for !portFound { + endpoint = findAvailableAddress(tb, network) + _, port, err := net.SplitHostPort(endpoint) + require.NoError(tb, err) + portFound = true + if runtime.GOOS == "windows" { + for _, pair := range exclusions { + if port >= pair.first && port <= pair.last { + portFound = false + break + } + } + } + } + + return endpoint +} + +func findAvailableAddress(tb testing.TB, network string) string { + switch network { + // net.Listen supported network strings + case "tcp", "tcp4", "tcp6", "unix", "unixpacket": + ln, err := net.Listen(network, "localhost:0") + require.NoError(tb, err, "Failed to get a free local port") + // There is a possible race if something else takes this same port before + // the test uses it, however, that is unlikely in practice. + defer func() { + assert.NoError(tb, ln.Close()) + }() + return ln.Addr().String() + // net.ListenPacket supported network strings + case "udp", "udp4", "udp6", "unixgram": + ln, err := net.ListenPacket(network, "localhost:0") + require.NoError(tb, err, "Failed to get a free local port") + // There is a possible race if something else takes this same port before + // the test uses it, however, that is unlikely in practice. + defer func() { + assert.NoError(tb, ln.Close()) + }() + return ln.LocalAddr().String() + } + return "" +} + +// Get excluded ports on Windows from the command: netsh interface ipv4 show excludedportrange protocol=tcp +func getExclusionsList(tb testing.TB) []portpair { + cmdTCP := exec.Command("netsh", "interface", "ipv4", "show", "excludedportrange", "protocol=tcp") + outputTCP, errTCP := cmdTCP.CombinedOutput() + require.NoError(tb, errTCP) + exclusions := createExclusionsList(tb, string(outputTCP)) + + cmdUDP := exec.Command("netsh", "interface", "ipv4", "show", "excludedportrange", "protocol=udp") + outputUDP, errUDP := cmdUDP.CombinedOutput() + require.NoError(tb, errUDP) + exclusions = append(exclusions, createExclusionsList(tb, string(outputUDP))...) + + return exclusions +} + +func createExclusionsList(tb testing.TB, exclusionsText string) []portpair { + var exclusions []portpair + + parts := strings.Split(exclusionsText, "--------") + require.Len(tb, parts, 3) + portsText := strings.Split(parts[2], "*") + require.Greater(tb, len(portsText), 1) // original text may have a suffix like " - Administered port exclusions." + lines := strings.Split(portsText[0], "\n") + for _, line := range lines { + if strings.TrimSpace(line) != "" { + entries := strings.Fields(strings.TrimSpace(line)) + require.Len(tb, entries, 2) + pair := portpair{entries[0], entries[1]} + exclusions = append(exclusions, pair) + } + } + return exclusions +} + +// Force the state of feature gate for a test +// usage: defer SetFeatureGateForTest("gateName", true)() +func SetFeatureGateForTest(tb testing.TB, gate *featuregate.Gate, enabled bool) func() { + originalValue := gate.IsEnabled() + require.NoError(tb, featuregate.GlobalRegistry().Set(gate.ID(), enabled)) + return func() { + require.NoError(tb, featuregate.GlobalRegistry().Set(gate.ID(), originalValue)) + } +} + +func GetAvailablePort(tb testing.TB) int { + endpoint := GetAvailableLocalAddress(tb) + _, port, err := net.SplitHostPort(endpoint) + require.NoError(tb, err) + + portInt, err := strconv.Atoi(port) + require.NoError(tb, err) + + return portInt +} + +// EndpointForPort gets the endpoint for a given port using localhost. +func EndpointForPort(port int) string { + return fmt.Sprintf("localhost:%d", port) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go index 2758161ec56..9c9df67d19c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/parser.go @@ -18,6 +18,7 @@ func SplitString(input, delimiter string) ([]string, error) { current := "" delimiterLength := len(delimiter) quoteChar := "" // "" means we are not in quotes + escaped := false for i := 0; i < len(input); i++ { if quoteChar == "" && i+delimiterLength <= len(input) && input[i:i+delimiterLength] == delimiter { // delimiter @@ -31,13 +32,19 @@ func SplitString(input, delimiter string) ([]string, error) { continue } - if quoteChar == "" && (input[i] == '"' || input[i] == '\'') { // start of quote - quoteChar = string(input[i]) - continue - } - if string(input[i]) == quoteChar { // end of quote - quoteChar = "" - continue + if !escaped { // consider quote termination so long as previous character wasn't backslash + if quoteChar == "" && (input[i] == '"' || input[i] == '\'') { // start of quote + quoteChar = string(input[i]) + continue + } + if string(input[i]) == quoteChar { // end of quote + quoteChar = "" + continue + } + // Only if we weren't escaped could the next character result in escaped state + escaped = input[i] == '\\' // potentially escaping next character + } else { + escaped = false } current += string(input[i]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go new file mode 100644 index 00000000000..cd154b9d36c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils/uri.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package parseutils // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils" + +import ( + "net/url" + "strconv" + "strings" + + semconv "go.opentelemetry.io/collector/semconv/v1.27.0" +) + +const ( + // replace once conventions includes these + AttributeURLUserInfo = "url.user_info" + AttributeURLUsername = "url.username" + AttributeURLPassword = "url.password" +) + +// parseURI takes an absolute or relative uri and returns the parsed values. +func ParseURI(value string, semconvCompliant bool) (map[string]any, error) { + m := make(map[string]any) + + if strings.HasPrefix(value, "?") { + // remove the query string '?' prefix before parsing + v, err := url.ParseQuery(value[1:]) + if err != nil { + return nil, err + } + return queryToMap(v, m), nil + } + + var x *url.URL + var err error + var mappingFn func(*url.URL, map[string]any) (map[string]any, error) + + if semconvCompliant { + mappingFn = urlToSemconvMap + x, err = url.Parse(value) + if err != nil { + return nil, err + } + } else { + x, err = url.ParseRequestURI(value) + if err != nil { + return nil, err + } + + mappingFn = urlToMap + } + return mappingFn(x, m) +} + +// urlToMap converts a url.URL to a map, excludes any values that are not set. +func urlToSemconvMap(parsedURI *url.URL, m map[string]any) (map[string]any, error) { + m[semconv.AttributeURLOriginal] = parsedURI.String() + m[semconv.AttributeURLDomain] = parsedURI.Hostname() + m[semconv.AttributeURLScheme] = parsedURI.Scheme + m[semconv.AttributeURLPath] = parsedURI.Path + + if portString := parsedURI.Port(); len(portString) > 0 { + port, err := strconv.Atoi(portString) + if err != nil { + return nil, err + } + m[semconv.AttributeURLPort] = port + } + + if fragment := parsedURI.Fragment; len(fragment) > 0 { + m[semconv.AttributeURLFragment] = fragment + } + + if parsedURI.User != nil { + m[AttributeURLUserInfo] = parsedURI.User.String() + + if username := parsedURI.User.Username(); len(username) > 0 { + m[AttributeURLUsername] = username + } + + if pwd, isSet := parsedURI.User.Password(); isSet { + m[AttributeURLPassword] = pwd + } + } + + if query := parsedURI.RawQuery; len(query) > 0 { + m[semconv.AttributeURLQuery] = query + } + + if periodIdx := strings.LastIndex(parsedURI.Path, "."); periodIdx != -1 { + if periodIdx < len(parsedURI.Path)-1 { + m[semconv.AttributeURLExtension] = parsedURI.Path[periodIdx+1:] + } + } + + return m, nil +} + +// urlToMap converts a url.URL to a map, excludes any values that are not set. +func urlToMap(p *url.URL, m map[string]any) (map[string]any, error) { + scheme := p.Scheme + if scheme != "" { + m["scheme"] = scheme + } + + user := p.User.Username() + if user != "" { + m["user"] = user + } + + host := p.Hostname() + if host != "" { + m["host"] = host + } + + port := p.Port() + if port != "" { + m["port"] = port + } + + path := p.EscapedPath() + if path != "" { + m["path"] = path + } + + return queryToMap(p.Query(), m), nil +} + +// queryToMap converts a query string url.Values to a map. +func queryToMap(query url.Values, m map[string]any) map[string]any { + // no-op if query is empty, do not create the key m["query"] + if len(query) == 0 { + return m + } + + /* 'parameter' will represent url.Values + map[string]any{ + "parameter-a": []any{ + "a", + "b", + }, + "parameter-b": []any{ + "x", + "y", + }, + } + */ + parameters := map[string]any{} + for param, values := range query { + parameters[param] = queryParamValuesToMap(values) + } + m["query"] = parameters + return m +} + +// queryParamValuesToMap takes query string parameter values and +// returns an []interface populated with the values +func queryParamValuesToMap(values []string) []any { + v := make([]any, len(values)) + for i, value := range values { + v[i] = value + } + return v +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go index 209c2cbc775..512b6a6f7da 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils/encoding.go @@ -74,7 +74,7 @@ var encodingOverrides = map[string]encoding.Encoding{ } func lookupEncoding(enc string) (encoding.Encoding, error) { - if e, ok := encodingOverrides[strings.ToLower(enc)]; ok { + if e, ok := EncodingOverridesMap.Get(strings.ToLower(enc)); ok { return e, nil } e, err := ianaindex.IANA.Encoding(enc) @@ -94,3 +94,12 @@ func IsNop(enc string) bool { } return e == encoding.Nop } + +var EncodingOverridesMap = encodingOverridesMap{} + +type encodingOverridesMap struct{} + +func (e *encodingOverridesMap) Get(key string) (encoding.Encoding, bool) { + v, ok := encodingOverrides[key] + return v, ok +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go index 52170879a57..adb0c5dded5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt/ctimefmt.go @@ -16,8 +16,11 @@ import ( "time" ) -var ctimeRegexp = regexp.MustCompile(`%.`) -var decimalsRegexp = regexp.MustCompile(`\d`) +var ( + ctimeRegexp = regexp.MustCompile(`%.`) + invalidFractionalSecondsStrptime = regexp.MustCompile(`[^.,]%[Lfs]`) + decimalsRegexp = regexp.MustCompile(`\d`) +) var ctimeSubstitutes = map[string]string{ "%Y": "2006", @@ -121,10 +124,6 @@ func Parse(format, value string) (time.Time, error) { // ToNative converts ctime-like format string to Go native layout // (which is used by time.Time.Format() and time.Parse() functions). func ToNative(format string) (string, error) { - if match := decimalsRegexp.FindString(format); match != "" { - return "", errors.New("format string should not contain decimals") - } - var errs []error replaceFunc := func(directive string) string { if subst, ok := ctimeSubstitutes[directive]; ok { @@ -141,3 +140,26 @@ func ToNative(format string) (string, error) { return replaced, nil } + +func Validate(format string) error { + if match := decimalsRegexp.FindString(format); match != "" { + return errors.New("format string should not contain decimals") + } + + if match := invalidFractionalSecondsStrptime.FindString(format); match != "" { + return fmt.Errorf("invalid fractional seconds directive: '%s'. must be preceded with '.' or ','", match) + } + + directives := ctimeRegexp.FindAllString(format, -1) + + var errs []error + for _, directive := range directives { + if _, ok := ctimeSubstitutes[directive]; !ok { + errs = append(errs, errors.New("unsupported ctimefmt.ToNative() directive: "+directive)) + } + } + if len(errs) != 0 { + return fmt.Errorf("invalid strptime format: %v", errs) + } + return nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go index 8af480096f2..82c5523292e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/parser.go @@ -4,13 +4,19 @@ package timeutils // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" import ( + "errors" "fmt" + "regexp" "strings" "time" + "github.com/elastic/lunes" + strptime "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt" ) +var invalidFractionalSecondsGoTime = regexp.MustCompile(`[^.,9]9+`) + func StrptimeToGotime(layout string) (string, error) { return strptime.ToNative(layout) } @@ -23,6 +29,17 @@ func ParseStrptime(layout string, value any, location *time.Location) (time.Time return ParseGotime(goLayout, value, location) } +// ParseLocalizedStrptime is like ParseLocalizedGotime, but instead of using the native Go time layout, +// it uses the ctime-like format. +func ParseLocalizedStrptime(layout string, value any, location *time.Location, language string) (time.Time, error) { + goLayout, err := strptime.ToNative(layout) + if err != nil { + return time.Time{}, err + } + + return ParseLocalizedGotime(goLayout, value, location, language) +} + func GetLocation(location *string, layout *string) (*time.Location, error) { if location != nil && *location != "" { // If location is specified, it must be in the local timezone database @@ -41,6 +58,24 @@ func GetLocation(location *string, layout *string) (*time.Location, error) { return time.Local, nil } +// ParseLocalizedGotime is like ParseGotime, but instead of parsing a formatted time in +// English, it parses a value in foreign language, and returns the [time.Time] it represents. +// The language argument must be a well-formed BCP 47 language tag (e.g.: "en", "en-US"), and +// a known CLDR locale. +func ParseLocalizedGotime(layout string, value any, location *time.Location, language string) (time.Time, error) { + stringValue, err := convertParsingValue(value) + if err != nil { + return time.Time{}, err + } + + translatedVal, err := lunes.Translate(layout, stringValue, language) + if err != nil { + return time.Time{}, err + } + + return ParseGotime(layout, translatedVal, location) +} + func ParseGotime(layout string, value any, location *time.Location) (time.Time, error) { timeValue, err := parseGotime(layout, value, location) if err != nil { @@ -50,14 +85,9 @@ func ParseGotime(layout string, value any, location *time.Location) (time.Time, } func parseGotime(layout string, value any, location *time.Location) (time.Time, error) { - var str string - switch v := value.(type) { - case string: - str = v - case []byte: - str = string(v) - default: - return time.Time{}, fmt.Errorf("type %T cannot be parsed as a time", value) + str, err := convertParsingValue(value) + if err != nil { + return time.Time{}, err } result, err := time.ParseInLocation(layout, str, location) @@ -86,6 +116,20 @@ func parseGotime(layout string, value any, location *time.Location) (time.Time, return resultLoc, locErr } +func convertParsingValue(value any) (string, error) { + var str string + switch v := value.(type) { + case string: + str = v + case []byte: + str = string(v) + default: + return "", fmt.Errorf("type %T cannot be parsed as a time", value) + } + + return str, nil +} + // SetTimestampYear sets the year of a timestamp to the current year. // This is needed because year is missing from some time formats, such as rfc3164. func SetTimestampYear(t time.Time) time.Time { @@ -104,5 +148,35 @@ func SetTimestampYear(t time.Time) time.Time { return d } +// ValidateStrptime checks the given strptime layout and returns an error if it detects any known issues +// that prevent it from being parsed. +func ValidateStrptime(layout string) error { + return strptime.Validate(layout) +} + +func ValidateGotime(layout string) error { + if match := invalidFractionalSecondsGoTime.FindString(layout); match != "" { + return fmt.Errorf("invalid fractional seconds directive: '%s'. must be preceded with '.' or ','", match) + } + + return nil +} + +// ValidateLocale checks the given locale and returns an error if the language tag +// is not supported by the localized parser functions. +func ValidateLocale(locale string) error { + _, err := lunes.NewDefaultLocale(locale) + if err == nil { + return nil + } + + var e *lunes.ErrUnsupportedLocale + if errors.As(err, &e) { + return fmt.Errorf("unsupported locale '%s', value must be a supported BCP 47 language tag", locale) + } + + return fmt.Errorf("invalid locale '%s': %w", locale, err) +} + // Allows tests to override with deterministic value var Now = time.Now diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go index f73e9f8e8a8..037b4888d46 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go @@ -25,6 +25,16 @@ func Not[K any](matcher BoolExpr[K]) BoolExpr[K] { return notMatcher[K]{matcher: matcher} } +type alwaysTrueMatcher[K any] struct{} + +func (alm alwaysTrueMatcher[K]) Eval(_ context.Context, _ K) (bool, error) { + return true, nil +} + +func AlwaysTrue[K any]() BoolExpr[K] { + return alwaysTrueMatcher[K]{} +} + type orMatcher[K any] struct { matchers []BoolExpr[K] } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go index b394397c193..41d324d86db 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go @@ -28,7 +28,6 @@ var useOTTLBridge = featuregate.GlobalRegistry().MustRegister( // The logic determining if a log should be processed is based on include and exclude settings. // Include properties are checked before exclude settings are checked. func NewSkipExpr(mp *filterconfig.MatchConfig) (expr.BoolExpr[ottllog.TransformContext], error) { - if useOTTLBridge.IsEnabled() { return filterottl.NewLogSkipExprBridge(mp) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go index a4261a0676c..0a1d2c6d18c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go @@ -37,7 +37,6 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att // Convert attribute values from mp representation to in-memory representation. var rawAttributes []AttributeMatcher for _, attribute := range attributes { - if attribute.Key == "" { return nil, errors.New("can't have empty key in the list of attributes") } @@ -73,7 +72,6 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att } default: return nil, filterset.NewUnrecognizedMatchTypeError(config.MatchType) - } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go index 6324c8a35bd..705b2acf5a4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go @@ -6,12 +6,12 @@ package filterottl // import "github.com/open-telemetry/opentelemetry-collector- import ( "go.opentelemetry.io/collector/component" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" ) @@ -19,7 +19,7 @@ import ( // NewBoolExprForSpan creates a BoolExpr[ottlspan.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspan.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspan.TransformContext], error) { +func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspan.TransformContext], error) { parser, err := ottlspan.NewParser(functions, set) if err != nil { return nil, err @@ -35,7 +35,7 @@ func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[o // NewBoolExprForSpanEvent creates a BoolExpr[ottlspanevent.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspanevent.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspanevent.TransformContext], error) { +func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspanevent.TransformContext], error) { parser, err := ottlspanevent.NewParser(functions, set) if err != nil { return nil, err @@ -51,7 +51,7 @@ func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Fact // NewBoolExprForMetric creates a BoolExpr[ottlmetric.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlmetric.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlmetric.TransformContext], error) { +func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlmetric.TransformContext], error) { parser, err := ottlmetric.NewParser(functions, set) if err != nil { return nil, err @@ -67,7 +67,7 @@ func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory // NewBoolExprForDataPoint creates a BoolExpr[ottldatapoint.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottldatapoint.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottldatapoint.TransformContext], error) { +func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottldatapoint.TransformContext], error) { parser, err := ottldatapoint.NewParser(functions, set) if err != nil { return nil, err @@ -83,7 +83,7 @@ func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Fact // NewBoolExprForLog creates a BoolExpr[ottllog.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottllog.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottllog.TransformContext], error) { +func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottllog.TransformContext], error) { parser, err := ottllog.NewParser(functions, set) if err != nil { return nil, err @@ -99,7 +99,7 @@ func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ot // NewBoolExprForResource creates a BoolExpr[ottlresource.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlresource.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlresource.TransformContext], error) { +func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlresource.TransformContext], error) { parser, err := ottlresource.NewParser(functions, set) if err != nil { return nil, err @@ -111,3 +111,19 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto c := ottlresource.NewConditionSequence(statements, set, ottlresource.WithConditionSequenceErrorMode(errorMode)) return &c, nil } + +// NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. +// The passed in functions should use the ottlresource.TransformContext. +// If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected +func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlscope.TransformContext], error) { + parser, err := ottlscope.NewParser(functions, set) + if err != nil { + return nil, err + } + statements, err := parser.ParseConditions(conditions) + if err != nil { + return nil, err + } + c := ottlscope.NewConditionSequence(statements, set, ottlscope.WithConditionSequenceErrorMode(errorMode)) + return &c, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go index 355a148f6cb..3612d184966 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/functions.go @@ -14,13 +14,17 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" ) func StandardSpanFuncs() map[string]ottl.Factory[ottlspan.TransformContext] { - return ottlfuncs.StandardConverters[ottlspan.TransformContext]() + m := ottlfuncs.StandardConverters[ottlspan.TransformContext]() + isRootSpanFactory := ottlfuncs.NewIsRootSpanFactory() + m[isRootSpanFactory.Name()] = isRootSpanFactory + return m } func StandardSpanEventFuncs() map[string]ottl.Factory[ottlspanevent.TransformContext] { @@ -40,6 +44,10 @@ func StandardDataPointFuncs() map[string]ottl.Factory[ottldatapoint.TransformCon return ottlfuncs.StandardConverters[ottldatapoint.TransformContext]() } +func StandardScopeFuncs() map[string]ottl.Factory[ottlscope.TransformContext] { + return ottlfuncs.StandardConverters[ottlscope.TransformContext]() +} + func StandardLogFuncs() map[string]ottl.Factory[ottllog.TransformContext] { return ottlfuncs.StandardConverters[ottllog.TransformContext]() } @@ -68,7 +76,7 @@ func createHasAttributeOnDatapointFunction(_ ottl.FunctionContext, oArgs ottl.Ar } func hasAttributeOnDatapoint(key string, expectedVal string) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - return func(ctx context.Context, tCtx ottlmetric.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { return checkDataPoints(tCtx, key, &expectedVal) }, nil } @@ -92,7 +100,7 @@ func createHasAttributeKeyOnDatapointFunction(_ ottl.FunctionContext, oArgs ottl } func hasAttributeKeyOnDatapoint(key string) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - return func(ctx context.Context, tCtx ottlmetric.TransformContext) (any, error) { + return func(_ context.Context, tCtx ottlmetric.TransformContext) (any, error) { return checkDataPoints(tCtx, key, nil) }, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go index 7a60b641651..f6c6c757d54 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go @@ -22,9 +22,7 @@ const ( MatchTypeFieldName = "match_type" ) -var ( - validMatchTypes = []MatchType{Regexp, Strict} -) +var validMatchTypes = []MatchType{Regexp, Strict} // Config configures the matching behavior of a FilterSet. type Config struct { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go index 2ee4358991f..734b8a01b75 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go @@ -9,7 +9,7 @@ import ( "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.27.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go index 104d6152a1b..ca6ce4c3519 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/authentication.go @@ -7,9 +7,11 @@ import ( "context" "crypto/sha256" "crypto/sha512" + "crypto/tls" "fmt" "github.com/IBM/sarama" + "github.com/aws/aws-msk-iam-sasl-signer-go/signer" "go.opentelemetry.io/collector/config/configtls" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/awsmsk" @@ -35,7 +37,7 @@ type SASLConfig struct { Username string `mapstructure:"username"` // Password to be used on authentication Password string `mapstructure:"password"` - // SASL Mechanism to be used, possible values are: (PLAIN, AWS_MSK_IAM, SCRAM-SHA-256 or SCRAM-SHA-512). + // SASL Mechanism to be used, possible values are: (PLAIN, AWS_MSK_IAM, AWS_MSK_IAM_OAUTHBEARER, SCRAM-SHA-256 or SCRAM-SHA-512). Mechanism string `mapstructure:"mechanism"` // SASL Protocol Version to be used, possible values are: (0, 1). Defaults to 0. Version int `mapstructure:"version"` @@ -44,27 +46,37 @@ type SASLConfig struct { } // AWSMSKConfig defines the additional SASL authentication -// measures needed to use AWS_MSK_IAM mechanism +// measures needed to use AWS_MSK_IAM and AWS_MSK_IAM_OAUTHBEARER mechanism type AWSMSKConfig struct { // Region is the AWS region the MSK cluster is based in Region string `mapstructure:"region"` // BrokerAddr is the client is connecting to in order to perform the auth required BrokerAddr string `mapstructure:"broker_addr"` + // Context + ctx context.Context } -// KerberosConfig defines kereros configuration. +// Token return the AWS session token for the AWS_MSK_IAM_OAUTHBEARER mechanism +func (c *AWSMSKConfig) Token() (*sarama.AccessToken, error) { + token, _, err := signer.GenerateAuthToken(c.ctx, c.Region) + + return &sarama.AccessToken{Token: token}, err +} + +// KerberosConfig defines kerberos configuration. type KerberosConfig struct { - ServiceName string `mapstructure:"service_name"` - Realm string `mapstructure:"realm"` - UseKeyTab bool `mapstructure:"use_keytab"` - Username string `mapstructure:"username"` - Password string `mapstructure:"password" json:"-"` - ConfigPath string `mapstructure:"config_file"` - KeyTabPath string `mapstructure:"keytab_file"` + ServiceName string `mapstructure:"service_name"` + Realm string `mapstructure:"realm"` + UseKeyTab bool `mapstructure:"use_keytab"` + Username string `mapstructure:"username"` + Password string `mapstructure:"password" json:"-"` + ConfigPath string `mapstructure:"config_file"` + KeyTabPath string `mapstructure:"keytab_file"` + DisablePAFXFAST bool `mapstructure:"disable_fast_negotiation"` } // ConfigureAuthentication configures authentication in sarama.Config. -func ConfigureAuthentication(config Authentication, saramaConfig *sarama.Config) error { +func ConfigureAuthentication(ctx context.Context, config Authentication, saramaConfig *sarama.Config) error { if config.PlainText != nil { configurePlaintext(*config.PlainText, saramaConfig) } @@ -74,7 +86,7 @@ func ConfigureAuthentication(config Authentication, saramaConfig *sarama.Config) } } if config.SASL != nil { - if err := configureSASL(*config.SASL, saramaConfig); err != nil { + if err := configureSASL(ctx, *config.SASL, saramaConfig); err != nil { return err } } @@ -91,13 +103,12 @@ func configurePlaintext(config PlainTextConfig, saramaConfig *sarama.Config) { saramaConfig.Net.SASL.Password = config.Password } -func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error { - - if config.Username == "" { +func configureSASL(ctx context.Context, config SASLConfig, saramaConfig *sarama.Config) error { + if config.Username == "" && config.Mechanism != "AWS_MSK_IAM_OAUTHBEARER" { return fmt.Errorf("username have to be provided") } - if config.Password == "" { + if config.Password == "" && config.Mechanism != "AWS_MSK_IAM_OAUTHBEARER" { return fmt.Errorf("password have to be provided") } @@ -119,8 +130,15 @@ func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error { return awsmsk.NewIAMSASLClient(config.AWSMSK.BrokerAddr, config.AWSMSK.Region, saramaConfig.ClientID) } saramaConfig.Net.SASL.Mechanism = awsmsk.Mechanism + case "AWS_MSK_IAM_OAUTHBEARER": + config.AWSMSK.ctx = ctx + saramaConfig.Net.SASL.Mechanism = sarama.SASLTypeOAuth + saramaConfig.Net.SASL.TokenProvider = &config.AWSMSK + tlsConfig := tls.Config{} + saramaConfig.Net.TLS.Enable = true + saramaConfig.Net.TLS.Config = &tlsConfig default: - return fmt.Errorf(`invalid SASL Mechanism %q: can be either "PLAIN", "AWS_MSK_IAM", "SCRAM-SHA-256" or "SCRAM-SHA-512"`, config.Mechanism) + return fmt.Errorf(`invalid SASL Mechanism %q: can be either "PLAIN", "AWS_MSK_IAM", "AWS_MSK_IAM_OAUTHBEARER", "SCRAM-SHA-256" or "SCRAM-SHA-512"`, config.Mechanism) } switch config.Version { @@ -159,4 +177,5 @@ func configureKerberos(config KerberosConfig, saramaConfig *sarama.Config) { saramaConfig.Net.SASL.GSSAPI.Username = config.Username saramaConfig.Net.SASL.GSSAPI.Realm = config.Realm saramaConfig.Net.SASL.GSSAPI.ServiceName = config.ServiceName + saramaConfig.Net.SASL.GSSAPI.DisablePAFXFAST = config.DisablePAFXFAST } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go index 0c3f83d8baa..269dcfbd771 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/scram_client.go @@ -34,7 +34,6 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { // completes is also an error. func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { return x.ClientConversation.Step(challenge) - } // Done returns true if the conversation is completed or has errored. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/Makefile rename to vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/Makefile diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md new file mode 100644 index 00000000000..663d969e5d6 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/README.md @@ -0,0 +1,4 @@ +# Kafka Topic Context Accessor + +This module is used for accessing the topic within a context. +See the [kafka exporter readme](../../../exporter/kafkaexporter/README.md#destination-topic) for more details. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go new file mode 100644 index 00000000000..603863615fb --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/kafka_ctx.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package topic // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic" + +import ( + "context" +) + +func WithTopic(ctx context.Context, topic string) context.Context { + return context.WithValue(ctx, topicContextKey{}, topic) +} + +func FromContext(ctx context.Context) (string, bool) { + contextTopic, ok := ctx.Value(topicContextKey{}).(string) + return contextTopic, ok +} + +type topicContextKey struct{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml new file mode 100644 index 00000000000..708ccce63f8 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic/metadata.yaml @@ -0,0 +1,6 @@ +type: topic + +status: + class: pkg + codeowners: + active: [pavolloffay, MovieStoreGuy] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md index c644810d2c4..e5e54971b6c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md @@ -6,20 +6,57 @@ This guide is specific to the OpenTelemetry Transformation Language. All guidel - Changes to the OpenTelemetry Transformation Language should be made independent of any component that depend on the package. Whenever possible, try not to submit PRs that change both the OTTL and a dependent component. Instead, submit a PR that updates the OTTL and then, once merged, update the component as needed. -## New Values +## Adding New Editors/Converters -When adding new values to the grammar you must: +Before raising a PR with a new Editor or Converter, raise an issue to verify its acceptance. While acceptance is strongly specific to a specific use case, consider these guidelines for early assessment. -1. Update the `Value` struct with the new value. This may also mean adding new token(s) to the lexer. -2. Update `NewFunctionCall` to be able to handle calling functions with this new value. -3. Update `NewGetter` to be able to handle the new value. -4. Add new unit tests. +Your proposal likely will be accepted if: + +- The proposed functionality is missing, +- The proposed solution significantly improves user experience and readability for very common use cases, +- The proposed solution is more performant in cases where it is possible to achieve the same result with existing options. +- The proposed solution makes use of packages from the Go standard library to offer functionality possible through an existing option in a more standard or reliable manner. + +It will be up for discussion if: + +- Your proposal solves an issue that can be achieved in another way but does not improve user experience or performance. +- The proposed functionality is not obviously applicable to the needs of a significant number of OTTL users. +- Your proposal extracts data into a structure with enumerable keys or values and OpenTelemetry semantic conventions do not cover the shape or values for this data. + +Your proposal likely won't be accepted if: + +- User experience is worse and assumes a highly technical user, +- The performance of your proposal very negatively affects the processing pipeline. + +As with code, OTTL aims for readability first. This means: -## New Functions +- Using short, meaningful, and descriptive names, +- Ensuring naming consistency across Editors and Converters, +- Avoiding deep nesting to achieve desired transformations, +- Ensuring Editors and Converters have a single responsibility. + +### Implementation guidelines All new functions must be added via a new file. Function files must start with `func_`. Functions must be placed in `ottlfuncs`. Unit tests must be added for all new functions. Unit test files must start with `func_` and end in `_test`. Unit tests must be placed in the same directory as the function. Functions that are not specific to a pipeline should be tested independently of any specific pipeline. Functions that are specific to a pipeline should be tests against that pipeline. End-to-end tests must be added in the `e2e` directory. -Function names should follow the [Function Syntax Guidelines](ottlfuncs/README.md#function-syntax) +#### Naming and Parameter Guidelines + +Functions should be named and formatted according to the following standards. + +- Function names MUST start with a verb unless it is a Factory that creates a new type. +- Converters MUST be UpperCamelCase. +- Function names that contain multiple words MUST separate those words with `_`. +- Functions that interact with multiple items MUST have plurality in the name. Ex: `truncate_all`, `keep_keys`, `replace_all_matches`. +- Functions that interact with a single item MUST NOT have plurality in the name. If a function would interact with multiple items due to a condition, like `where`, it is still considered singular. Ex: `set`, `delete`, `replace_match`. +- Functions that change a specific target MUST set the target as the first parameter. +## New Values + +When adding new values to the grammar you must: + +1. Update the `Value` struct with the new value. This may also mean adding new token(s) to the lexer. +2. Update `NewFunctionCall` to be able to handle calling functions with this new value. +3. Update `NewGetter` to be able to handle the new value. +4. Add new unit tests. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md index 05df67e0ffe..ee4545228a7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LANGUAGE.md @@ -1,6 +1,6 @@ ## Grammar -The OTTL grammar includes function invocations, Values and Boolean Expressions. These parts all fit into a Statement, which is the basis of execution in the OTTL. +OTTL grammar includes function invocations, Values and Boolean Expressions. These parts all fit into a Statement, which is the basis of execution in OTTL. ### Design principles @@ -18,9 +18,9 @@ An Editor is made up of 2 parts: - a string identifier. The string identifier must start with a lowercase letter. - zero or more Values (comma separated) surrounded by parentheses (`()`). -**The OTTL has no built-in Editors.** +**OTTL has no built-in Editors.** Users must supply a map between string identifiers and Editor implementations. -The OTTL will use this map to determine which implementation to call when executing a Statement. +OTTL will use this map to determine which implementation to call when executing a Statement. See [ottlfuncs](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#editors) for pre-made, usable Editors. ### Converters @@ -32,9 +32,9 @@ Converters are made up of 3 parts: - zero or more Values (comma separated) surrounded by parentheses (`()`). - a combination of zero or more a string key (`["key"]`) or int key (`[0]`) -**The OTTL has no built-in Converters.** +**OTTL has no built-in Converters.** Users must include Converters in the same map that Editors are supplied. -The OTTL will use this map and reflection to generate Converters that can then be invoked by the user. +OTTL will use this map and reflection to generate Converters that can then be invoked by the user. See [ottlfuncs](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters) for pre-made, usable Converters. When keys are supplied the value returned by the Converter will be indexed by the keys in order. @@ -69,6 +69,7 @@ The following types are supported for single-value parameters in OTTL functions: - `IntLikeGetter` - `BoolGetter` - `BoolLikeGetter` +- `ByteSliceLikeGetter` - `Enum` - `string` - `float64` @@ -88,7 +89,7 @@ For slice parameters, the following types are supported: - `string` - `float64` - `int64` -- `uint8`. Byte slice literals are parsed as byte slices by the OTTL. +- `uint8`. Byte slice literals are parsed as byte slices by OTTL. - `Getter` To make a parameter optional, use the `Optional` type, which takes a type argument for the underlying @@ -113,10 +114,11 @@ Values are passed as function parameters or are used in a Boolean Expression. Va - [Enums](#enums) - [Converters](#converters) - [Math Expressions](#math-expressions) +- [Maps](#maps) ### Paths -A Path Value is a reference to a telemetry field. Paths are made up of lowercase identifiers, dots (`.`), and square brackets combined with a string key (`["key"]`) or int key (`[0]`). **The interpretation of a Path is NOT implemented by the OTTL.** Instead, the user must provide a `PathExpressionParser` that the OTTL can use to interpret paths. As a result, how the Path parts are used is up to the user. However, it is recommended that the parts be used like so: +A Path Value is a reference to a telemetry field. Paths are made up of lowercase identifiers, dots (`.`), and square brackets combined with a string key (`["key"]`) or int key (`[0]`). **The interpretation of a Path is NOT implemented by OTTL.** Instead, the user must provide a `PathExpressionParser` that OTTL can use to interpret paths. As a result, how the Path parts are used is up to the user. However, it is recommended that the parts be used like so: - Identifiers are used to map to a telemetry field. - Dots (`.`) are used to separate nested fields. @@ -154,13 +156,23 @@ Example List Values: - `["1", "2", "3"]` - `["a", attributes["key"], Concat(["a", "b"], "-")]` +### Maps + +A Map Value comprises a set of key Value pairs. + +Example Map Values: +- `{}` +- `{"foo": "bar"}` +- `{"foo": {"a": 2}}` +- `{"foo": {"a": attributes["key"]}}` + ### Literals Literals are literal interpretations of the Value into a Go value. Accepted literals are: - Strings. Strings are represented as literals by surrounding the string in double quotes (`""`). -- Ints. Ints are represented by any digit, optionally prepended by plus (`+`) or minus (`-`). Internally the OTTL represents all ints as `int64` -- Floats. Floats are represented by digits separated by a dot (`.`), optionally prepended by plus (`+`) or minus (`-`). The leading digit is optional. Internally the OTTL represents all Floats as `float64`. +- Ints. Ints are represented by any digit, optionally prepended by plus (`+`) or minus (`-`). Internally OTTL represents all ints as `int64` +- Floats. Floats are represented by digits separated by a dot (`.`), optionally prepended by plus (`+`) or minus (`-`). The leading digit is optional. Internally OTTL represents all Floats as `float64`. - Bools. Bools are represented by the exact strings `true` and `false`. - Nil. Nil is represented by the exact string `nil`. - Byte slices. Byte slices are represented via a hex string prefaced with `0x` @@ -175,7 +187,7 @@ Example Literals ### Enums -Enums are uppercase identifiers that get interpreted during parsing and converted to an `int64`. **The interpretation of an Enum is NOT implemented by the OTTL.** Instead, the user must provide a `EnumParser` that the OTTL can use to interpret the Enum. The `EnumParser` returns an `int64` instead of a function, which means that the Enum's numeric value is retrieved during parsing instead of during execution. +Enums are uppercase identifiers that get interpreted during parsing and converted to an `int64`. **The interpretation of an Enum is NOT implemented by OTTL.** Instead, the user must provide a `EnumParser` that OTTL can use to interpret the Enum. The `EnumParser` returns an `int64` instead of a function, which means that the Enum's numeric value is retrieved during parsing instead of during execution. Within the grammar Enums are always used as `int64`. As a result, the Enum's symbol can be used as if it is an Int value. @@ -280,7 +292,7 @@ Examples: ## Accessing signal telemetry -Access to signal telemetry is provided to OTTL functions through a `TransformContext` that is created by the user and passed during statement evaluation. To allow functions to operate on the `TransformContext`, the OTTL provides `Getter`, `Setter`, and `GetSetter` interfaces. +Access to signal telemetry is provided to OTTL functions through a `TransformContext` that is created by the user and passed during statement evaluation. To allow functions to operate on the `TransformContext`, OTTL provides `Getter`, `Setter`, and `GetSetter` interfaces. ### Getters and Setters @@ -293,6 +305,6 @@ Getters allow for reading the following types of data. See the respective sectio It is possible to update the Value in a telemetry field using a Setter. For read and write access, the `GetSetter` interface extends both interfaces. -## Logging inside a OTTL function +## Logging inside an OTTL function -To emit logs inside a OTTL function, add a parameter of type [`component.TelemetrySettings`](https://pkg.go.dev/go.opentelemetry.io/collector/component#TelemetrySettings) to the function signature. The OTTL will then inject the TelemetrySettings that were passed to `NewParser` into the function. TelemetrySettings can be used to emit logs. +To emit logs inside an OTTL function, add a parameter of type [`component.TelemetrySettings`](https://pkg.go.dev/go.opentelemetry.io/collector/component#TelemetrySettings) to the function signature. OTTL will then inject the TelemetrySettings that were passed to `NewParser` into the function. TelemetrySettings can be used to emit logs. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md index 2c25f746009..b7d7a125f45 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md @@ -4,14 +4,19 @@ | ------------- |-----------| | Stability | [alpha]: traces, metrics, logs | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Apkg%2Fottl%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Apkg%2Fottl) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Apkg%2Fottl%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Apkg%2Fottl) | -| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@kentquirk](https://www.github.com/kentquirk), [@bogdandrutu](https://www.github.com/bogdandrutu), [@evan-bradley](https://www.github.com/evan-bradley) \| Seeking more code owners! | -[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha -The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md). +The OpenTelemetry Transformation Language (OTTL) is a small, domain-specific programming language intended to process data with OpenTelemetry-native concepts and constructs. -This package reads in OTTL statements and converts them to invokable functions/booleans based on the OTTL's grammar. +This package implements everything necessary to use OTTL in a Collector component or in another user-facing system. + +- [Getting Started](#getting-started) +- [Where to use OTTL](#where-to-use-ottl) +- [Troubleshooting](#troubleshooting) +- [Resources](#resources) ## Getting Started @@ -19,114 +24,52 @@ If you're looking to write OTTL statements for a component's configuration check See [OTTL Functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#ottl-functions) for a list of functions available for use in the OTTL statements of most components. -OTTL Contexts define how you access the fields on a piece of telemetry. See the table to find the exact list of available fields: +OTTL Contexts define how you access the fields on a given telemetry item. See the table to find the exact list of available fields: | Telemetry | OTTL Context | |-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| -| `Resource` | [Resource](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlresource/README.md) | -| `Instrumentation Scope` | [Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlscope/README.md) | -| `Span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspan/README.md) | -| `Span Event` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspanevent/README.md) | -| `Metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlmetric/README.md) | -| `Datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottldatapoint/README.md) | -| `Log` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) | - -### Component Creators - -If you're looking to use OTTL in your component, check out [the OTTL grammar](./LANGUAGE.md). - -## Examples - -These examples contain a SQL-like declarative language. Applied statements interact with only one signal, but statements can be declared across multiple signals. Functions used in examples are indicative of what could be useful. - -### Remove a forbidden attribute - -``` -traces: - delete(attributes["http.request.header.authorization"]) -metrics: - delete(attributes["http.request.header.authorization"]) -logs: - delete(attributes["http.request.header.authorization"]) -``` - -### Remove all attributes except for some - -``` -traces: - keep_keys(attributes, ["http.method", "http.status_code"]) -metrics: - keep_keys(attributes, ["http.method", "http.status_code"]) -logs: - keep_keys(attributes, ["http.method", "http.status_code"]) -``` - -### Reduce cardinality of an attribute - -``` -traces: - replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}") -``` - -### Reduce cardinality of a span name +| `Resource` | [Resource](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlresource/README.md) | +| `Instrumentation Scope` | [Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlscope/README.md) | +| `Span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspan/README.md) | +| `Span Event` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspanevent/README.md) | +| `Metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlmetric/README.md) | +| `Datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottldatapoint/README.md) | +| `Log` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottllog/README.md) | -``` -traces: - replace_match(name, "GET /user/*/list/*", "GET /user/{userId}/list/{listId}") -``` - -### Reduce cardinality of any matching attribute +To understand what OTTL offers as a language, check out [OTTL's grammar doc](./LANGUAGE.md). -``` -traces: - replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}") -``` - -### Decrease the size of the telemetry payload - -``` -traces: - delete(resource.attributes["process.command_line"]) -metrics: - delete(resource.attributes["process.command_line"]) -logs: - delete(resource.attributes["process.command_line"]) -``` +## Where to use OTTL -### Attach information from resource into telemetry +- To modify your data as it passes through a pipeline, use the [transform processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md). +- To remove data from your pipeline, use the [filter processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md). +- To select spans to be sampled, use the [tail sampling processor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/tailsamplingprocessor/README.md). +- To route data between pipelines, use the [routing connector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/routingconnector/README.md). -``` -metrics: - set(attributes["k8s_pod"], resource.attributes["k8s.pod.name"]) -``` +## Troubleshooting -### Decorate error spans with additional information +When using OTTL you can enable debug logging in the collector to print out useful information, +such as the current Statement/Condition and the current TransformContext, to help you troubleshoot +why a statement is not behaving as you expect. This feature is very verbose, but provides you an accurate +view into how OTTL views the underlying data. -``` -traces: - set(attributes["whose_fault"], "theirs") where attributes["http.status"] == 400 or attributes["http.status"] == 404 - set(attributes["whose_fault"], "ours") where attributes["http.status"] == 500 +```yaml +service: + telemetry: + logs: + level: debug ``` -### Update a spans ID - ``` -logs: - set(span_id, SpanID(0x0000000000000000)) -traces: - set(span_id, SpanID(0x0000000000000000)) +2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:265 initial TransformContext {"kind": "processor", "name": "transform", "pipeline": "logs", "TransformContext": {"resource": {"attributes": {}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(resource.attributes[\"test\"], \"pass\")", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +2024-05-29T16:38:09.600-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(instrumentation_scope.attributes[\"test\"], [\"pass\"])", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {"test": ["pass"]}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +2024-05-29T16:38:09.601-0600 debug ottl@v0.101.0/parser.go:268 TransformContext after statement execution {"kind": "processor", "name": "transform", "pipeline": "logs", "statement": "set(attributes[\"test\"], true)", "condition matched": true, "TransformContext": {"resource": {"attributes": {"test": "pass"}, "dropped_attribute_count": 0}, "scope": {"attributes": {"test": ["pass"]}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log", "test": true}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022289500721000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} ``` -### Convert metric name to snake case +## Resources -``` -metrics: - set(metric.name, ConvertCase(metric.name, "snake")) -``` - -### Check if an attribute exists +These are previous conference presentations given about OTTL: -``` -traces: - set(attributes["test-passed"], true) where attributes["target-attribute"] != nil -``` +- [OTTL Me Why Transforming Telemetry in the OpenTelemetry Collector Just Got Better](https://youtu.be/uVs0oUV72CE) +- [Managing Observability Data at the Edge with the OpenTelemetry Collector and OTTL](https://youtu.be/GO0ulYLxy_8) +- [The OTTL Cookbook: A Collection of Solutions to Common Problems](https://www.youtube.com/watch?v=UGTU0-KT_60) \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go index 9bd2df9171b..fd3378d343c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go @@ -94,7 +94,6 @@ func (p *Parser[K]) newComparisonEvaluator(comparison *comparison) (BoolExpr[K], } return p.compare(a, b, comparison.Op), nil }}, nil - } func (p *Parser[K]) newBoolExpr(expr *booleanExpression) (BoolExpr[K], error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go index 50d1109f00c..0c20542f2a8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go @@ -7,7 +7,6 @@ import ( "bytes" "time" - "go.uber.org/zap" "golang.org/x/exp/constraints" ) @@ -17,9 +16,7 @@ import ( // invalidComparison returns false for everything except ne (where it returns true to indicate that the // objects were definitely not equivalent). -// It also gives us an opportunity to log something. -func (p *Parser[K]) invalidComparison(msg string, op compareOp) bool { - p.telemetrySettings.Logger.Debug(msg, zap.Any("op", op)) +func (p *Parser[K]) invalidComparison(op compareOp) bool { return op == ne } @@ -87,7 +84,7 @@ func (p *Parser[K]) compareBool(a bool, b any, op compareOp) bool { case bool: return compareBools(a, v, op) default: - return p.invalidComparison("bool to non-bool", op) + return p.invalidComparison(op) } } @@ -96,7 +93,7 @@ func (p *Parser[K]) compareString(a string, b any, op compareOp) bool { case string: return comparePrimitives(a, v, op) default: - return p.invalidComparison("string to non-string", op) + return p.invalidComparison(op) } } @@ -110,7 +107,7 @@ func (p *Parser[K]) compareByte(a []byte, b any, op compareOp) bool { } return compareBytes(a, v, op) default: - return p.invalidComparison("Bytes to non-Bytes", op) + return p.invalidComparison(op) } } @@ -121,7 +118,7 @@ func (p *Parser[K]) compareInt64(a int64, b any, op compareOp) bool { case float64: return comparePrimitives(float64(a), v, op) default: - return p.invalidComparison("int to non-numeric value", op) + return p.invalidComparison(op) } } @@ -132,7 +129,7 @@ func (p *Parser[K]) compareFloat64(a float64, b any, op compareOp) bool { case float64: return comparePrimitives(a, v, op) default: - return p.invalidComparison("float to non-numeric value", op) + return p.invalidComparison(op) } } @@ -143,7 +140,7 @@ func (p *Parser[K]) compareDuration(a time.Duration, b any, op compareOp) bool { vnsecs := v.Nanoseconds() return comparePrimitives(ansecs, vnsecs, op) default: - return p.invalidComparison("cannot compare invalid duration", op) + return p.invalidComparison(op) } } @@ -164,10 +161,10 @@ func (p *Parser[K]) compareTime(a time.Time, b any, op compareOp) bool { case gt: return a.After(v) default: - return p.invalidComparison("invalid comparison operator", op) + return p.invalidComparison(op) } default: - return p.invalidComparison("time to non-time value", op) + return p.invalidComparison(op) } } @@ -211,7 +208,7 @@ func (p *Parser[K]) compare(a any, b any, op compareOp) bool { case ne: return a != b default: - return p.invalidComparison("unsupported type for inequality on left", op) + return p.invalidComparison(op) } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go new file mode 100644 index 00000000000..da4ade78327 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/context_inferrer.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + +import "math" + +var defaultContextInferPriority = []string{ + "log", + "metric", + "datapoint", + "spanevent", + "span", + "resource", + "scope", + "instrumentation_scope", +} + +// contextInferrer is an interface used to infer the OTTL context from statements paths. +type contextInferrer interface { + // infer returns the OTTL context inferred from the given statements paths. + infer(statements []string) (string, error) +} + +type priorityContextInferrer struct { + contextPriority map[string]int +} + +func (s *priorityContextInferrer) infer(statements []string) (string, error) { + var inferredContext string + var inferredContextPriority int + + for _, statement := range statements { + parsed, err := parseStatement(statement) + if err != nil { + return inferredContext, err + } + + for _, p := range getParsedStatementPaths(parsed) { + pathContextPriority, ok := s.contextPriority[p.Context] + if !ok { + // Lowest priority + pathContextPriority = math.MaxInt + } + + if inferredContext == "" || pathContextPriority < inferredContextPriority { + inferredContext = p.Context + inferredContextPriority = pathContextPriority + } + } + } + + return inferredContext, nil +} + +// defaultPriorityContextInferrer is like newPriorityContextInferrer, but using the default +// context priorities and ignoring unknown/non-prioritized contexts. +func defaultPriorityContextInferrer() contextInferrer { + return newPriorityContextInferrer(defaultContextInferPriority) +} + +// newPriorityContextInferrer creates a new priority-based context inferrer. +// To infer the context, it compares all [ottl.Path.Context] values, prioritizing them based +// on the provide contextsPriority argument, the lower the context position is in the array, +// the more priority it will have over other items. +// If unknown/non-prioritized contexts are found on the statements, they can be either ignored +// or considered when no other prioritized context is found. To skip unknown contexts, the +// ignoreUnknownContext argument must be set to false. +func newPriorityContextInferrer(contextsPriority []string) contextInferrer { + contextPriority := make(map[string]int, len(contextsPriority)) + for i, ctx := range contextsPriority { + contextPriority[ctx] = i + } + return &priorityContextInferrer{ + contextPriority: contextPriority, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go new file mode 100644 index 00000000000..4837506d55d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging/logging.go @@ -0,0 +1,390 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package logging // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" + +import ( + "encoding/hex" + "errors" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap/zapcore" +) + +type Slice pcommon.Slice + +func (s Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ss := pcommon.Slice(s) + var err error + for i := 0; i < ss.Len(); i++ { + v := ss.At(i) + switch v.Type() { + case pcommon.ValueTypeStr: + encoder.AppendString(v.Str()) + case pcommon.ValueTypeBool: + encoder.AppendBool(v.Bool()) + case pcommon.ValueTypeInt: + encoder.AppendInt64(v.Int()) + case pcommon.ValueTypeDouble: + encoder.AppendFloat64(v.Double()) + case pcommon.ValueTypeMap: + err = errors.Join(err, encoder.AppendObject(Map(v.Map()))) + case pcommon.ValueTypeSlice: + err = errors.Join(err, encoder.AppendArray(Slice(v.Slice()))) + case pcommon.ValueTypeBytes: + encoder.AppendByteString(v.Bytes().AsRaw()) + } + } + return err +} + +type Map pcommon.Map + +func (m Map) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + mm := pcommon.Map(m) + var err error + mm.Range(func(k string, v pcommon.Value) bool { + switch v.Type() { + case pcommon.ValueTypeStr: + encoder.AddString(k, v.Str()) + case pcommon.ValueTypeBool: + encoder.AddBool(k, v.Bool()) + case pcommon.ValueTypeInt: + encoder.AddInt64(k, v.Int()) + case pcommon.ValueTypeDouble: + encoder.AddFloat64(k, v.Double()) + case pcommon.ValueTypeMap: + err = errors.Join(err, encoder.AddObject(k, Map(v.Map()))) + case pcommon.ValueTypeSlice: + err = errors.Join(err, encoder.AddArray(k, Slice(v.Slice()))) + case pcommon.ValueTypeBytes: + encoder.AddByteString(k, v.Bytes().AsRaw()) + } + return true + }) + return nil +} + +type Resource pcommon.Resource + +func (r Resource) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + rr := pcommon.Resource(r) + err := encoder.AddObject("attributes", Map(rr.Attributes())) + encoder.AddUint32("dropped_attribute_count", rr.DroppedAttributesCount()) + return err +} + +type InstrumentationScope pcommon.InstrumentationScope + +func (i InstrumentationScope) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + is := pcommon.InstrumentationScope(i) + err := encoder.AddObject("attributes", Map(is.Attributes())) + encoder.AddUint32("dropped_attribute_count", is.DroppedAttributesCount()) + encoder.AddString("name", is.Name()) + encoder.AddString("version", is.Version()) + return err +} + +type Span ptrace.Span + +func (s Span) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ss := ptrace.Span(s) + parentSpanID := ss.ParentSpanID() + spanID := ss.SpanID() + traceID := ss.TraceID() + err := encoder.AddObject("attributes", Map(ss.Attributes())) + encoder.AddUint32("dropped_attribute_count", ss.DroppedAttributesCount()) + encoder.AddUint32("dropped_events_count", ss.DroppedEventsCount()) + encoder.AddUint32("dropped_links_count", ss.DroppedLinksCount()) + encoder.AddUint64("end_time_unix_nano", uint64(ss.EndTimestamp())) + err = errors.Join(err, encoder.AddArray("events", SpanEventSlice(ss.Events()))) + encoder.AddString("kind", ss.Kind().String()) + err = errors.Join(err, encoder.AddArray("links", SpanLinkSlice(ss.Links()))) + encoder.AddString("name", ss.Name()) + encoder.AddString("parent_span_id", hex.EncodeToString(parentSpanID[:])) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddUint64("start_time_unix_nano", uint64(ss.StartTimestamp())) + encoder.AddString("status.code", ss.Status().Code().String()) + encoder.AddString("status.message", ss.Status().Message()) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + encoder.AddString("trace_state", ss.TraceState().AsRaw()) + return err +} + +type SpanEventSlice ptrace.SpanEventSlice + +func (s SpanEventSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ses := ptrace.SpanEventSlice(s) + var err error + for i := 0; i < ses.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SpanEvent(ses.At(i)))) + } + return err +} + +type SpanEvent ptrace.SpanEvent + +func (s SpanEvent) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + se := ptrace.SpanEvent(s) + err := encoder.AddObject("attributes", Map(se.Attributes())) + encoder.AddUint32("dropped_attribute_count", se.DroppedAttributesCount()) + encoder.AddString("name", se.Name()) + encoder.AddUint64("time_unix_nano", uint64(se.Timestamp())) + return err +} + +type SpanLinkSlice ptrace.SpanLinkSlice + +func (s SpanLinkSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + sls := ptrace.SpanLinkSlice(s) + var err error + for i := 0; i < sls.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SpanLink(sls.At(i)))) + } + return err +} + +type SpanLink ptrace.SpanLink + +func (s SpanLink) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + sl := ptrace.SpanLink(s) + spanID := sl.SpanID() + traceID := sl.TraceID() + err := encoder.AddObject("attributes", Map(sl.Attributes())) + encoder.AddUint32("dropped_attribute_count", sl.DroppedAttributesCount()) + encoder.AddUint32("flags", sl.Flags()) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + encoder.AddString("trace_state", sl.TraceState().AsRaw()) + return err +} + +type Metric pmetric.Metric + +func (m Metric) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + mm := pmetric.Metric(m) + encoder.AddString("description", mm.Description()) + encoder.AddString("name", mm.Name()) + encoder.AddString("unit", mm.Unit()) + encoder.AddString("type", mm.Type().String()) + + var err error + switch mm.Type() { + case pmetric.MetricTypeSum: + encoder.AddString("aggregation_temporality", mm.Sum().AggregationTemporality().String()) + encoder.AddBool("is_monotonic", mm.Sum().IsMonotonic()) + err = encoder.AddArray("datapoints", NumberDataPointSlice(mm.Sum().DataPoints())) + case pmetric.MetricTypeGauge: + err = encoder.AddArray("datapoints", NumberDataPointSlice(mm.Gauge().DataPoints())) + case pmetric.MetricTypeHistogram: + encoder.AddString("aggregation_temporality", mm.Histogram().AggregationTemporality().String()) + err = encoder.AddArray("datapoints", HistogramDataPointSlice(mm.Histogram().DataPoints())) + case pmetric.MetricTypeExponentialHistogram: + encoder.AddString("aggregation_temporality", mm.ExponentialHistogram().AggregationTemporality().String()) + err = encoder.AddArray("datapoints", ExponentialHistogramDataPointSlice(mm.ExponentialHistogram().DataPoints())) + case pmetric.MetricTypeSummary: + err = encoder.AddArray("datapoints", SummaryDataPointSlice(mm.Summary().DataPoints())) + } + + return err +} + +type NumberDataPointSlice pmetric.NumberDataPointSlice + +func (n NumberDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ndps := pmetric.NumberDataPointSlice(n) + var err error + for i := 0; i < ndps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(NumberDataPoint(ndps.At(i)))) + } + return err +} + +type NumberDataPoint pmetric.NumberDataPoint + +func (n NumberDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ndp := pmetric.NumberDataPoint(n) + err := encoder.AddObject("attributes", Map(ndp.Attributes())) + err = errors.Join(err, encoder.AddArray("exemplars", ExemplarSlice(ndp.Exemplars()))) + encoder.AddUint32("flags", uint32(ndp.Flags())) + encoder.AddUint64("start_time_unix_nano", uint64(ndp.StartTimestamp())) + encoder.AddUint64("time_unix_nano", uint64(ndp.Timestamp())) + if ndp.ValueType() == pmetric.NumberDataPointValueTypeInt { + encoder.AddInt64("value_int", ndp.IntValue()) + } + if ndp.ValueType() == pmetric.NumberDataPointValueTypeDouble { + encoder.AddFloat64("value_double", ndp.DoubleValue()) + } + + return err +} + +type HistogramDataPointSlice pmetric.HistogramDataPointSlice + +func (h HistogramDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + hdps := pmetric.HistogramDataPointSlice(h) + var err error + for i := 0; i < hdps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(HistogramDataPoint(hdps.At(i)))) + } + return err +} + +type HistogramDataPoint pmetric.HistogramDataPoint + +func (h HistogramDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + hdp := pmetric.HistogramDataPoint(h) + err := encoder.AddObject("attributes", Map(hdp.Attributes())) + err = errors.Join(err, encoder.AddArray("bucket_counts", UInt64Slice(hdp.BucketCounts()))) + encoder.AddUint64("count", hdp.Count()) + err = errors.Join(err, encoder.AddArray("exemplars", ExemplarSlice(hdp.Exemplars()))) + err = errors.Join(err, encoder.AddArray("explicit_bounds", Float64Slice(hdp.ExplicitBounds()))) + encoder.AddUint32("flags", uint32(hdp.Flags())) + encoder.AddFloat64("max", hdp.Max()) + encoder.AddFloat64("min", hdp.Min()) + encoder.AddUint64("start_time_unix_nano", uint64(hdp.StartTimestamp())) + encoder.AddFloat64("sum", hdp.Sum()) + encoder.AddUint64("time_unix_nano", uint64(hdp.Timestamp())) + + return err +} + +type ExponentialHistogramDataPointSlice pmetric.ExponentialHistogramDataPointSlice + +func (e ExponentialHistogramDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + ehdps := pmetric.ExponentialHistogramDataPointSlice(e) + var err error + for i := 0; i < ehdps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(ExponentialHistogramDataPoint(ehdps.At(i)))) + } + return err +} + +type ExponentialHistogramDataPoint pmetric.ExponentialHistogramDataPoint + +func (e ExponentialHistogramDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ehdp := pmetric.ExponentialHistogramDataPoint(e) + err := encoder.AddObject("attributes", Map(ehdp.Attributes())) + encoder.AddUint64("count", ehdp.Count()) + err = errors.Join(err, encoder.AddArray("exemplars", ExemplarSlice(ehdp.Exemplars()))) + encoder.AddUint32("flags", uint32(ehdp.Flags())) + encoder.AddFloat64("max", ehdp.Max()) + encoder.AddFloat64("min", ehdp.Min()) + err = errors.Join(err, encoder.AddObject("negative", ExponentialHistogramDataPointBuckets(ehdp.Negative()))) + err = errors.Join(err, encoder.AddObject("positive", ExponentialHistogramDataPointBuckets(ehdp.Positive()))) + encoder.AddInt32("scale", ehdp.Scale()) + encoder.AddUint64("start_time_unix_nano", uint64(ehdp.StartTimestamp())) + encoder.AddFloat64("sum", ehdp.Sum()) + encoder.AddUint64("time_unix_nano", uint64(ehdp.Timestamp())) + encoder.AddUint64("zero_count", ehdp.ZeroCount()) + encoder.AddFloat64("zero_threshold", ehdp.ZeroThreshold()) + return err +} + +type ExponentialHistogramDataPointBuckets pmetric.ExponentialHistogramDataPointBuckets + +func (e ExponentialHistogramDataPointBuckets) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + b := pmetric.ExponentialHistogramDataPointBuckets(e) + err := encoder.AddArray("bucket_counts", UInt64Slice(b.BucketCounts())) + encoder.AddInt32("offset", b.Offset()) + return err +} + +type SummaryDataPointSlice pmetric.SummaryDataPointSlice + +func (s SummaryDataPointSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + sdps := pmetric.SummaryDataPointSlice(s) + var err error + for i := 0; i < sdps.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SummaryDataPoint(sdps.At(i)))) + } + return err +} + +type SummaryDataPoint pmetric.SummaryDataPoint + +func (s SummaryDataPoint) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + sdp := pmetric.SummaryDataPoint(s) + err := encoder.AddObject("attributes", Map(sdp.Attributes())) + encoder.AddUint64("count", sdp.Count()) + encoder.AddUint32("flags", uint32(sdp.Flags())) + encoder.AddUint64("start_time_unix_nano", uint64(sdp.StartTimestamp())) + encoder.AddFloat64("sum", sdp.Sum()) + encoder.AddUint64("time_unix_nano", uint64(sdp.Timestamp())) + err = errors.Join(err, encoder.AddArray("quantile_values", SummaryDataPointValueAtQuantileSlice(sdp.QuantileValues()))) + + return err +} + +type SummaryDataPointValueAtQuantileSlice pmetric.SummaryDataPointValueAtQuantileSlice + +func (s SummaryDataPointValueAtQuantileSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + qs := pmetric.SummaryDataPointValueAtQuantileSlice(s) + var err error + for i := 0; i < qs.Len(); i++ { + err = errors.Join(err, encoder.AppendObject(SummaryDataPointValueAtQuantile(qs.At(i)))) + } + return nil +} + +type SummaryDataPointValueAtQuantile pmetric.SummaryDataPointValueAtQuantile + +func (s SummaryDataPointValueAtQuantile) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + q := pmetric.SummaryDataPointValueAtQuantile(s) + encoder.AddFloat64("value", q.Value()) + encoder.AddFloat64("quantile", q.Quantile()) + return nil +} + +type UInt64Slice pcommon.UInt64Slice + +func (u UInt64Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + uis := pcommon.UInt64Slice(u) + for i := 0; i < uis.Len(); i++ { + encoder.AppendUint64(uis.At(i)) + } + return nil +} + +type Float64Slice pcommon.Float64Slice + +func (f Float64Slice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + fs := pcommon.Float64Slice(f) + for i := 0; i < fs.Len(); i++ { + encoder.AppendFloat64(fs.At(i)) + } + return nil +} + +type ExemplarSlice pmetric.ExemplarSlice + +func (e ExemplarSlice) MarshalLogArray(encoder zapcore.ArrayEncoder) error { + es := pmetric.ExemplarSlice(e) + var err error + for i := 0; i < es.Len(); i++ { + ee := es.At(i) + err = errors.Join(err, encoder.AppendObject(Exemplar(ee))) + } + return err +} + +type Exemplar pmetric.Exemplar + +func (e Exemplar) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + ee := pmetric.Exemplar(e) + spanID := ee.SpanID() + traceID := ee.TraceID() + err := encoder.AddObject("filtered_attributes", Map(ee.FilteredAttributes())) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddUint64("time_unix_nano", uint64(ee.Timestamp())) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + if ee.ValueType() == pmetric.ExemplarValueTypeInt { + encoder.AddInt64("value_int", ee.IntValue()) + } + if ee.ValueType() == pmetric.ExemplarValueTypeDouble { + encoder.AddFloat64("value_double", ee.DoubleValue()) + } + return err +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go index 97aa011d118..e2944a73df4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/metric.go @@ -53,10 +53,10 @@ func MetricPathGetSetter[K MetricContext](path ottl.Path[K]) (ottl.GetSetter[K], func accessMetric[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newMetric, ok := val.(pmetric.Metric); ok { newMetric.CopyTo(tCtx.GetMetric()) } @@ -67,10 +67,10 @@ func accessMetric[K MetricContext]() ottl.StandardGetSetter[K] { func accessName[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetName(str) } @@ -81,10 +81,10 @@ func accessName[K MetricContext]() ottl.StandardGetSetter[K] { func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Description(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetDescription(str) } @@ -95,10 +95,10 @@ func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] { func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetMetric().Unit(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetMetric().SetUnit(str) } @@ -109,10 +109,10 @@ func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] { func accessType[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetMetric().Type()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, _ K, _ any) error { // TODO Implement methods so correctly convert data types. // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10130 return nil @@ -122,7 +122,7 @@ func accessType[K MetricContext]() ottl.StandardGetSetter[K] { func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: @@ -134,7 +134,7 @@ func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newAggTemporality, ok := val.(int64); ok { metric := tCtx.GetMetric() switch metric.Type() { @@ -153,14 +153,14 @@ func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] { func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() if metric.Type() == pmetric.MetricTypeSum { return metric.Sum().IsMonotonic(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newIsMonotonic, ok := val.(bool); ok { metric := tCtx.GetMetric() if metric.Type() == pmetric.MetricTypeSum { @@ -174,7 +174,7 @@ func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] { func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: @@ -190,7 +190,7 @@ func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { metric := tCtx.GetMetric() switch metric.Type() { case pmetric.MetricTypeSum: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go index c7d9d802b66..954d1432964 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/path.go @@ -12,6 +12,7 @@ import ( var _ ottl.Path[any] = &TestPath[any]{} type TestPath[K any] struct { + C string N string KeySlice []ottl.Key[K] NextPath *TestPath[K] @@ -21,6 +22,10 @@ func (p *TestPath[K]) Name() string { return p.N } +func (p *TestPath[K]) Context() string { + return p.C +} + func (p *TestPath[K]) Next() ottl.Path[K] { if p.NextPath == nil { return nil diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go index 3606f5d6bd6..2dfee7fce9f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/resource.go @@ -13,6 +13,7 @@ import ( type ResourceContext interface { GetResource() pcommon.Resource + GetResourceSchemaURLItem() SchemaURLItem } func ResourcePathGetSetter[K ResourceContext](path ottl.Path[K]) (ottl.GetSetter[K], error) { @@ -27,6 +28,8 @@ func ResourcePathGetSetter[K ResourceContext](path ottl.Path[K]) (ottl.GetSetter return accessResourceAttributesKey[K](path.Keys()), nil case "dropped_attributes_count": return accessResourceDroppedAttributesCount[K](), nil + case "schema_url": + return accessResourceSchemaURLItem[K](), nil default: return nil, FormatDefaultErrorMessage(path.Name(), path.String(), "Resource", ResourceContextRef) } @@ -34,10 +37,10 @@ func ResourcePathGetSetter[K ResourceContext](path ottl.Path[K]) (ottl.GetSetter func accessResource[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetResource(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newRes, ok := val.(pcommon.Resource); ok { newRes.CopyTo(tCtx.GetResource()) } @@ -48,10 +51,10 @@ func accessResource[K ResourceContext]() ottl.StandardGetSetter[K] { func accessResourceAttributes[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetResource().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetResource().Attributes()) } @@ -73,10 +76,10 @@ func accessResourceAttributesKey[K ResourceContext](keys []ottl.Key[K]) ottl.Sta func accessResourceDroppedAttributesCount[K ResourceContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetResource().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetResource().SetDroppedAttributesCount(uint32(i)) } @@ -84,3 +87,17 @@ func accessResourceDroppedAttributesCount[K ResourceContext]() ottl.StandardGetS }, } } + +func accessResourceSchemaURLItem[K ResourceContext]() ottl.StandardGetSetter[K] { + return ottl.StandardGetSetter[K]{ + Getter: func(_ context.Context, tCtx K) (any, error) { + return tCtx.GetResourceSchemaURLItem().SchemaUrl(), nil + }, + Setter: func(_ context.Context, tCtx K, val any) error { + if schemaURL, ok := val.(string); ok { + tCtx.GetResourceSchemaURLItem().SetSchemaUrl(schemaURL) + } + return nil + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go new file mode 100644 index 00000000000..c5b6702200e --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/schema.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 +package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + +//revive:disable:var-naming The methods in this interface are defined by pdata types. +type SchemaURLItem interface { + SchemaUrl() string + SetSchemaUrl(v string) +} + +//revive:enable:var-naming diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go index dcf85ae6881..6bc5d735200 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/scope.go @@ -13,6 +13,7 @@ import ( type InstrumentationScopeContext interface { GetInstrumentationScope() pcommon.InstrumentationScope + GetScopeSchemaURLItem() SchemaURLItem } func ScopePathGetSetter[K InstrumentationScopeContext](path ottl.Path[K]) (ottl.GetSetter[K], error) { @@ -32,6 +33,8 @@ func ScopePathGetSetter[K InstrumentationScopeContext](path ottl.Path[K]) (ottl. return accessInstrumentationScopeAttributesKey[K](mapKeys), nil case "dropped_attributes_count": return accessInstrumentationScopeDroppedAttributesCount[K](), nil + case "schema_url": + return accessInstrumentationScopeSchemaURLItem[K](), nil default: return nil, FormatDefaultErrorMessage(path.Name(), path.String(), "Instrumentation Scope", InstrumentationScopeRef) } @@ -39,10 +42,10 @@ func ScopePathGetSetter[K InstrumentationScopeContext](path ottl.Path[K]) (ottl. func accessInstrumentationScope[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newIl, ok := val.(pcommon.InstrumentationScope); ok { newIl.CopyTo(tCtx.GetInstrumentationScope()) } @@ -53,10 +56,10 @@ func accessInstrumentationScope[K InstrumentationScopeContext]() ottl.StandardGe func accessInstrumentationScopeAttributes[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetInstrumentationScope().Attributes()) } @@ -78,10 +81,10 @@ func accessInstrumentationScopeAttributesKey[K InstrumentationScopeContext](keys func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetInstrumentationScope().SetName(str) } @@ -92,10 +95,10 @@ func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.Standa func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetInstrumentationScope().Version(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetInstrumentationScope().SetVersion(str) } @@ -106,10 +109,10 @@ func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.Sta func accessInstrumentationScopeDroppedAttributesCount[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetInstrumentationScope().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetInstrumentationScope().SetDroppedAttributesCount(uint32(i)) } @@ -117,3 +120,17 @@ func accessInstrumentationScopeDroppedAttributesCount[K InstrumentationScopeCont }, } } + +func accessInstrumentationScopeSchemaURLItem[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] { + return ottl.StandardGetSetter[K]{ + Getter: func(_ context.Context, tCtx K) (any, error) { + return tCtx.GetScopeSchemaURLItem().SchemaUrl(), nil + }, + Setter: func(_ context.Context, tCtx K, val any) error { + if schemaURL, ok := val.(string); ok { + tCtx.GetScopeSchemaURLItem().SetSchemaUrl(schemaURL) + } + return nil + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go index e7270cbc0d6..607cb2e110f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/span.go @@ -134,10 +134,10 @@ func SpanPathGetSetter[K SpanContext](path ottl.Path[K]) (ottl.GetSetter[K], err func accessSpan[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newSpan, ok := val.(ptrace.Span); ok { newSpan.CopyTo(tCtx.GetSpan()) } @@ -148,10 +148,10 @@ func accessSpan[K SpanContext]() ottl.StandardGetSetter[K] { func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().TraceID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newTraceID, ok := val.(pcommon.TraceID); ok { tCtx.GetSpan().SetTraceID(newTraceID) } @@ -162,11 +162,11 @@ func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().TraceID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseTraceID(str) if err != nil { @@ -181,10 +181,10 @@ func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] { func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().SpanID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetSpan().SetSpanID(newSpanID) } @@ -195,11 +195,11 @@ func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().SpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseSpanID(str) if err != nil { @@ -214,10 +214,10 @@ func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessTraceState[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().TraceState().AsRaw(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().TraceState().FromRaw(str) } @@ -266,10 +266,10 @@ func accessTraceStateKey[K SpanContext](keys []ottl.Key[K]) (ottl.StandardGetSet func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().ParentSpanID(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if newParentSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetSpan().SetParentSpanID(newParentSpanID) } @@ -280,11 +280,11 @@ func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { id := tCtx.GetSpan().ParentSpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { id, err := ParseSpanID(str) if err != nil { @@ -299,10 +299,10 @@ func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] { func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Name(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().SetName(str) } @@ -313,10 +313,10 @@ func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] { func accessKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().Kind()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetKind(ptrace.SpanKind(i)) } @@ -327,10 +327,10 @@ func accessKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessStringKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Kind().String(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if s, ok := val.(string); ok { var kind ptrace.SpanKind switch s { @@ -358,10 +358,10 @@ func accessStringKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessDeprecatedStringKind[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return traceutil.SpanKindStr(tCtx.GetSpan().Kind()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if s, ok := val.(string); ok { var kind ptrace.SpanKind switch s { @@ -389,10 +389,10 @@ func accessDeprecatedStringKind[K SpanContext]() ottl.StandardGetSetter[K] { func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().StartTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -403,10 +403,10 @@ func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().EndTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -417,10 +417,10 @@ func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] { func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().StartTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -431,10 +431,10 @@ func accessStartTime[K SpanContext]() ottl.StandardGetSetter[K] { func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().EndTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -445,10 +445,10 @@ func accessEndTime[K SpanContext]() ottl.StandardGetSetter[K] { func accessAttributes[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetSpan().Attributes()) } @@ -470,10 +470,10 @@ func accessAttributesKey[K SpanContext](keys []ottl.Key[K]) ottl.StandardGetSett func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedAttributesCount(uint32(i)) } @@ -484,12 +484,12 @@ func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Events(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if slc, ok := val.(ptrace.SpanEventSlice); ok { - tCtx.GetSpan().Events().RemoveIf(func(event ptrace.SpanEvent) bool { + tCtx.GetSpan().Events().RemoveIf(func(_ ptrace.SpanEvent) bool { return true }) slc.CopyTo(tCtx.GetSpan().Events()) @@ -501,10 +501,10 @@ func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] { func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedEventsCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedEventsCount(uint32(i)) } @@ -515,12 +515,12 @@ func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] { func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Links(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if slc, ok := val.(ptrace.SpanLinkSlice); ok { - tCtx.GetSpan().Links().RemoveIf(func(event ptrace.SpanLink) bool { + tCtx.GetSpan().Links().RemoveIf(func(_ ptrace.SpanLink) bool { return true }) slc.CopyTo(tCtx.GetSpan().Links()) @@ -532,10 +532,10 @@ func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] { func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().DroppedLinksCount()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().SetDroppedLinksCount(uint32(i)) } @@ -546,10 +546,10 @@ func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Status(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if status, ok := val.(ptrace.Status); ok { status.CopyTo(tCtx.GetSpan().Status()) } @@ -560,10 +560,10 @@ func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return int64(tCtx.GetSpan().Status().Code()), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if i, ok := val.(int64); ok { tCtx.GetSpan().Status().SetCode(ptrace.StatusCode(i)) } @@ -574,10 +574,10 @@ func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] { func accessStatusMessage[K SpanContext]() ottl.StandardGetSetter[K] { return ottl.StandardGetSetter[K]{ - Getter: func(ctx context.Context, tCtx K) (any, error) { + Getter: func(_ context.Context, tCtx K) (any, error) { return tCtx.GetSpan().Status().Message(), nil }, - Setter: func(ctx context.Context, tCtx K, val any) error { + Setter: func(_ context.Context, tCtx K, val any) error { if str, ok := val.(string); ok { tCtx.GetSpan().Status().SetMessage(str) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go index 37a39068d3e..9c50d85cd72 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go @@ -5,23 +5,29 @@ package ottldatapoint // import "github.com/open-telemetry/opentelemetry-collect import ( "context" + "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) const ( contextName = "DataPoint" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { dataPoint any @@ -30,11 +36,33 @@ type TransformContext struct { instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeMetrics pmetric.ScopeMetrics + resourceMetrics pmetric.ResourceMetrics +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("metric", logging.Metric(tCtx.metric))) + + switch dp := tCtx.dataPoint.(type) { + case pmetric.NumberDataPoint: + err = encoder.AddObject("datapoint", logging.NumberDataPoint(dp)) + case pmetric.HistogramDataPoint: + err = encoder.AddObject("datapoint", logging.HistogramDataPoint(dp)) + case pmetric.ExponentialHistogramDataPoint: + err = encoder.AddObject("datapoint", logging.ExponentialHistogramDataPoint(dp)) + case pmetric.SummaryDataPoint: + err = encoder.AddObject("datapoint", logging.SummaryDataPoint(dp)) + } + + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(dataPoint any, metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { +func NewTransformContext(dataPoint any, metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) TransformContext { return TransformContext{ dataPoint: dataPoint, metric: metric, @@ -42,6 +70,8 @@ func NewTransformContext(dataPoint any, metric pmetric.Metric, metrics pmetric.M instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeMetrics: scopeMetrics, + resourceMetrics: resourceMetrics, } } @@ -69,6 +99,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeMetrics +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceMetrics +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -227,10 +265,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -252,7 +290,7 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes(), nil @@ -265,7 +303,7 @@ func accessAttributes() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: if attrs, ok := val.(pcommon.Map); ok { @@ -322,7 +360,7 @@ func accessAttributesKey(key []ottl.Key[TransformContext]) ottl.StandardGetSette func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime().UnixNano(), nil @@ -335,7 +373,7 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -355,7 +393,7 @@ func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessStartTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime(), nil @@ -368,7 +406,7 @@ func accessStartTime() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(time.Time); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -388,7 +426,7 @@ func accessStartTime() ottl.StandardGetSetter[TransformContext] { func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime().UnixNano(), nil @@ -401,7 +439,7 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -421,7 +459,7 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime(), nil @@ -434,7 +472,7 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTime, ok := val.(time.Time); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -454,13 +492,13 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { return numberDataPoint.DoubleValue(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newDouble, ok := val.(float64); ok { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { numberDataPoint.SetDoubleValue(newDouble) @@ -473,13 +511,13 @@ func accessDoubleValue() ottl.StandardGetSetter[TransformContext] { func accessIntValue() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { return numberDataPoint.IntValue(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newInt, ok := val.(int64); ok { if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok { numberDataPoint.SetIntValue(newInt) @@ -492,7 +530,7 @@ func accessIntValue() ottl.StandardGetSetter[TransformContext] { func accessExemplars() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Exemplars(), nil @@ -503,7 +541,7 @@ func accessExemplars() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newExemplars, ok := val.(pmetric.ExemplarSlice); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -521,7 +559,7 @@ func accessExemplars() ottl.StandardGetSetter[TransformContext] { func accessFlags() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: return int64(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Flags()), nil @@ -534,7 +572,7 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newFlags, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.NumberDataPoint: @@ -554,7 +592,7 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { func accessCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: return int64(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Count()), nil @@ -565,7 +603,7 @@ func accessCount() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newCount, ok := val.(int64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: @@ -583,7 +621,7 @@ func accessCount() ottl.StandardGetSetter[TransformContext] { func accessSum() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Sum(), nil @@ -594,7 +632,7 @@ func accessSum() ottl.StandardGetSetter[TransformContext] { } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newSum, ok := val.(float64); ok { switch tCtx.GetDataPoint().(type) { case pmetric.HistogramDataPoint: @@ -612,13 +650,13 @@ func accessSum() ottl.StandardGetSetter[TransformContext] { func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { return histogramDataPoint.ExplicitBounds().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newExplicitBounds, ok := val.([]float64); ok { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { histogramDataPoint.ExplicitBounds().FromRaw(newExplicitBounds) @@ -631,13 +669,13 @@ func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] { func accessBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { return histogramDataPoint.BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newBucketCount, ok := val.([]uint64); ok { if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok { histogramDataPoint.BucketCounts().FromRaw(newBucketCount) @@ -650,13 +688,13 @@ func accessBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessScale() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Scale()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newScale, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.SetScale(int32(newScale)) @@ -669,13 +707,13 @@ func accessScale() ottl.StandardGetSetter[TransformContext] { func accessZeroCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.ZeroCount()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newZeroCount, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.SetZeroCount(uint64(newZeroCount)) @@ -688,13 +726,13 @@ func accessZeroCount() ottl.StandardGetSetter[TransformContext] { func accessPositive() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Positive(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositive, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { newPositive.CopyTo(expoHistogramDataPoint.Positive()) @@ -707,13 +745,13 @@ func accessPositive() ottl.StandardGetSetter[TransformContext] { func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Positive().Offset()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositiveOffset, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Positive().SetOffset(int32(newPositiveOffset)) @@ -726,13 +764,13 @@ func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] { func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Positive().BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newPositiveBucketCounts, ok := val.([]uint64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Positive().BucketCounts().FromRaw(newPositiveBucketCounts) @@ -745,13 +783,13 @@ func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessNegative() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Negative(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegative, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { newNegative.CopyTo(expoHistogramDataPoint.Negative()) @@ -764,13 +802,13 @@ func accessNegative() ottl.StandardGetSetter[TransformContext] { func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return int64(expoHistogramDataPoint.Negative().Offset()), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegativeOffset, ok := val.(int64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Negative().SetOffset(int32(newNegativeOffset)) @@ -783,13 +821,13 @@ func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] { func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { return expoHistogramDataPoint.Negative().BucketCounts().AsRaw(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newNegativeBucketCounts, ok := val.([]uint64); ok { if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok { expoHistogramDataPoint.Negative().BucketCounts().FromRaw(newNegativeBucketCounts) @@ -802,13 +840,13 @@ func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] { func accessQuantileValues() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok { return summaryDataPoint.QuantileValues(), nil } return nil, nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newQuantileValues, ok := val.(pmetric.SummaryDataPointValueAtQuantileSlice); ok { if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok { newQuantileValues.CopyTo(summaryDataPoint.QuantileValues()) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go index 7e73c2dbde3..7ca056730cc 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go @@ -6,40 +6,77 @@ package ottllog // import "github.com/open-telemetry/opentelemetry-collector-con import ( "context" "encoding/hex" + "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" + common "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon" ) const ( contextName = "Log" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { logRecord plog.LogRecord instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeLogs plog.ScopeLogs + resourceLogs plog.ResourceLogs +} + +type logRecord plog.LogRecord + +func (l logRecord) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + lr := plog.LogRecord(l) + spanID := lr.SpanID() + traceID := lr.TraceID() + err := encoder.AddObject("attributes", logging.Map(lr.Attributes())) + encoder.AddString("body", lr.Body().AsString()) + encoder.AddUint32("dropped_attribute_count", lr.DroppedAttributesCount()) + encoder.AddUint32("flags", uint32(lr.Flags())) + encoder.AddUint64("observed_time_unix_nano", uint64(lr.ObservedTimestamp())) + encoder.AddInt32("severity_number", int32(lr.SeverityNumber())) + encoder.AddString("severity_text", lr.SeverityText()) + encoder.AddString("span_id", hex.EncodeToString(spanID[:])) + encoder.AddUint64("time_unix_nano", uint64(lr.Timestamp())) + encoder.AddString("trace_id", hex.EncodeToString(traceID[:])) + return err +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("log_record", logRecord(tCtx.logRecord))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(logRecord plog.LogRecord, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { +func NewTransformContext(logRecord plog.LogRecord, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeLogs plog.ScopeLogs, resourceLogs plog.ResourceLogs) TransformContext { return TransformContext{ logRecord: logRecord, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeLogs: scopeLogs, + resourceLogs: resourceLogs, } } @@ -59,6 +96,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeLogs +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceLogs +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -222,10 +267,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -247,10 +292,10 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Timestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -261,10 +306,10 @@ func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().ObservedTimestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } @@ -275,10 +320,10 @@ func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Timestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -289,10 +334,10 @@ func accessTime() ottl.StandardGetSetter[TransformContext] { func accessObservedTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().ObservedTimestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(time.Time); ok { tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(i)) } @@ -303,10 +348,10 @@ func accessObservedTime() ottl.StandardGetSetter[TransformContext] { func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().SeverityNumber()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetSeverityNumber(plog.SeverityNumber(i)) } @@ -317,10 +362,10 @@ func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] { func accessSeverityText() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().SeverityText(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if s, ok := val.(string); ok { tCtx.GetLogRecord().SetSeverityText(s) } @@ -331,10 +376,10 @@ func accessSeverityText() ottl.StandardGetSetter[TransformContext] { func accessBody() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { - return ottlcommon.GetValue(tCtx.GetLogRecord().Body()), nil + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { + return common.GetValue(tCtx.GetLogRecord().Body()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { return internal.SetValue(tCtx.GetLogRecord().Body(), val) }, } @@ -369,10 +414,10 @@ func accessBodyKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tran func accessStringBody() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Body().AsString(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { tCtx.GetLogRecord().Body().SetStr(str) } @@ -383,10 +428,10 @@ func accessStringBody() ottl.StandardGetSetter[TransformContext] { func accessAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetLogRecord().Attributes()) } @@ -408,10 +453,10 @@ func accessAttributesKey(key []ottl.Key[TransformContext]) ottl.StandardGetSette func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetDroppedAttributesCount(uint32(i)) } @@ -422,10 +467,10 @@ func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] { func accessFlags() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetLogRecord().Flags()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if i, ok := val.(int64); ok { tCtx.GetLogRecord().SetFlags(plog.LogRecordFlags(i)) } @@ -436,10 +481,10 @@ func accessFlags() ottl.StandardGetSetter[TransformContext] { func accessTraceID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().TraceID(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTraceID, ok := val.(pcommon.TraceID); ok { tCtx.GetLogRecord().SetTraceID(newTraceID) } @@ -450,11 +495,11 @@ func accessTraceID() ottl.StandardGetSetter[TransformContext] { func accessStringTraceID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { id := tCtx.GetLogRecord().TraceID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { id, err := internal.ParseTraceID(str) if err != nil { @@ -469,10 +514,10 @@ func accessStringTraceID() ottl.StandardGetSetter[TransformContext] { func accessSpanID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetLogRecord().SpanID(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newSpanID, ok := val.(pcommon.SpanID); ok { tCtx.GetLogRecord().SetSpanID(newSpanID) } @@ -483,11 +528,11 @@ func accessSpanID() ottl.StandardGetSetter[TransformContext] { func accessStringSpanID() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { id := tCtx.GetLogRecord().SpanID() return hex.EncodeToString(id[:]), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if str, ok := val.(string); ok { id, err := internal.ParseSpanID(str) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go index 0cdc916fd14..eba931c7440 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go @@ -15,9 +15,11 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} -var _ internal.MetricContext = TransformContext{} +var ( + _ internal.ResourceContext = TransformContext{} + _ internal.InstrumentationScopeContext = TransformContext{} + _ internal.MetricContext = TransformContext{} +) type TransformContext struct { metric pmetric.Metric @@ -25,17 +27,21 @@ type TransformContext struct { instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeMetrics pmetric.ScopeMetrics + resourceMetrics pmetric.ResourceMetrics } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { +func NewTransformContext(metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) TransformContext { return TransformContext{ metric: metric, metrics: metrics, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeMetrics: scopeMetrics, + resourceMetrics: resourceMetrics, } } @@ -59,6 +65,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeMetrics +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceMetrics +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -145,10 +159,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go index a7bd956129d..da3a8ceea1b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go @@ -5,28 +5,42 @@ package ottlresource // import "github.com/open-telemetry/opentelemetry-collecto import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) -var _ internal.ResourceContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { - resource pcommon.Resource - cache pcommon.Map + resource pcommon.Resource + cache pcommon.Map + schemaURLItem internal.SchemaURLItem +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(resource pcommon.Resource) TransformContext { +func NewTransformContext(resource pcommon.Resource, schemaURLItem internal.SchemaURLItem) TransformContext { return TransformContext{ - resource: resource, - cache: pcommon.NewMap(), + resource: resource, + cache: pcommon.NewMap(), + schemaURLItem: schemaURLItem, } } @@ -38,6 +52,10 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.schemaURLItem +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -112,10 +130,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md new file mode 100644 index 00000000000..4df2ae36160 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/README.md @@ -0,0 +1,27 @@ +# Instrumentation Scope Context + +The Instrumentation Scope Context is a Context implementation for [pdata Instrumentation Scope](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/generated_instrumentationscope.go), the Collector's internal representation for OTLP instrumentation scope data. This Context should be used when interacting only with OTLP instrumentation scope. + +## Paths +In general, the Instrumentation Scope Context supports accessing pdata using the field names from the instrumentation section in the [common proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/common/v1/common.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`. + +The following paths are supported. + +| path | field accessed | type | +|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------| +| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map | +| cache\[""\] | the value of an item in cache. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | +| resource | resource of the instrumentation scope being processed | pcommon.Resource | +| resource.attributes | resource attributes of the instrumentation scope being processed | pcommon.Map | +| resource.attributes\[""\] | the value of the resource attribute of the instrumentation scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | +| resource.dropped_attributes_count | number of dropped attributes of the resource of the instrumentation scope being processed | int64 | +| name | name of the instrumentation scope of the scope being processed | string | +| version | version of the instrumentation scope of the scope being processed | string | +| dropped_attributes_count | number of dropped attributes of the instrumentation scope of the scope being processed | int64 | +| attributes | instrumentation scope attributes of the scope being processed | pcommon.Map | +| attributes\[""\] | the value of the instrumentation scope attribute of the scope being processed. Supports multiple indexes to access nested fields. | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil | + + +## Enums + +The Instrumentation Scope Context does not define any Enums at this time. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go new file mode 100644 index 00000000000..3ae5e097644 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope/scope.go @@ -0,0 +1,168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlscope // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope" + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.uber.org/zap/zapcore" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" +) + +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) + +type TransformContext struct { + instrumentationScope pcommon.InstrumentationScope + resource pcommon.Resource + cache pcommon.Map + schemaURLItem internal.SchemaURLItem +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err +} + +type Option func(*ottl.Parser[TransformContext]) + +func NewTransformContext(instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, schemaURLItem internal.SchemaURLItem) TransformContext { + return TransformContext{ + instrumentationScope: instrumentationScope, + resource: resource, + cache: pcommon.NewMap(), + schemaURLItem: schemaURLItem, + } +} + +func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope { + return tCtx.instrumentationScope +} + +func (tCtx TransformContext) GetResource() pcommon.Resource { + return tCtx.resource +} + +func (tCtx TransformContext) getCache() pcommon.Map { + return tCtx.cache +} + +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.schemaURLItem +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.schemaURLItem +} + +func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { + pep := pathExpressionParser{telemetrySettings} + p, err := ottl.NewParser[TransformContext]( + functions, + pep.parsePath, + telemetrySettings, + ottl.WithEnumParser[TransformContext](parseEnum), + ) + if err != nil { + return ottl.Parser[TransformContext]{}, err + } + for _, opt := range options { + opt(&p) + } + return p, nil +} + +type StatementSequenceOption func(*ottl.StatementSequence[TransformContext]) + +func WithStatementSequenceErrorMode(errorMode ottl.ErrorMode) StatementSequenceOption { + return func(s *ottl.StatementSequence[TransformContext]) { + ottl.WithStatementSequenceErrorMode[TransformContext](errorMode)(s) + } +} + +func NewStatementSequence(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementSequenceOption) ottl.StatementSequence[TransformContext] { + s := ottl.NewStatementSequence(statements, telemetrySettings) + for _, op := range options { + op(&s) + } + return s +} + +type ConditionSequenceOption func(*ottl.ConditionSequence[TransformContext]) + +func WithConditionSequenceErrorMode(errorMode ottl.ErrorMode) ConditionSequenceOption { + return func(c *ottl.ConditionSequence[TransformContext]) { + ottl.WithConditionSequenceErrorMode[TransformContext](errorMode)(c) + } +} + +func NewConditionSequence(conditions []*ottl.Condition[TransformContext], telemetrySettings component.TelemetrySettings, options ...ConditionSequenceOption) ottl.ConditionSequence[TransformContext] { + c := ottl.NewConditionSequence(conditions, telemetrySettings) + for _, op := range options { + op(&c) + } + return c +} + +func parseEnum(_ *ottl.EnumSymbol) (*ottl.Enum, error) { + return nil, fmt.Errorf("instrumentation scope context does not provide Enum support") +} + +type pathExpressionParser struct { + telemetrySettings component.TelemetrySettings +} + +func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ottl.GetSetter[TransformContext], error) { + if path == nil { + return nil, fmt.Errorf("path cannot be nil") + } + switch path.Name() { + case "cache": + if path.Keys() == nil { + return accessCache(), nil + } + return accessCacheKey(path.Keys()), nil + case "resource": + return internal.ResourcePathGetSetter[TransformContext](path.Next()) + default: + return internal.ScopePathGetSetter[TransformContext](path) + } +} + +func accessCache() ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { + return tCtx.getCache(), nil + }, + Setter: func(_ context.Context, tCtx TransformContext, val any) error { + if m, ok := val.(pcommon.Map); ok { + m.CopyTo(tCtx.getCache()) + } + return nil + }, + } +} + +func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[TransformContext] { + return ottl.StandardGetSetter[TransformContext]{ + Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + return internal.GetMapValue[TransformContext](ctx, tCtx, tCtx.getCache(), key) + }, + Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + return internal.SetMapValue[TransformContext](ctx, tCtx, tCtx.getCache(), key, val) + }, + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go index 8f400540ba6..aa3283124bb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go @@ -5,34 +5,52 @@ package ottlspan // import "github.com/open-telemetry/opentelemetry-collector-co import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { span ptrace.Span instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeSpans ptrace.ScopeSpans + resourceSpans ptrace.ResourceSpans +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("span", logging.Span(tCtx.span))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { +func NewTransformContext(span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeSpans ptrace.ScopeSpans, resourceSpans ptrace.ResourceSpans) TransformContext { return TransformContext{ span: span, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeSpans: scopeSpans, + resourceSpans: resourceSpans, } } @@ -52,6 +70,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resourceSpans +} + +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeSpans +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -136,10 +162,10 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go index d8182f4ec88..b3826f690d2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go @@ -5,20 +5,25 @@ package ottlspanevent // import "github.com/open-telemetry/opentelemetry-collect import ( "context" + "errors" "fmt" "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" + "go.uber.org/zap/zapcore" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging" ) -var _ internal.ResourceContext = TransformContext{} -var _ internal.InstrumentationScopeContext = TransformContext{} -var _ internal.SpanContext = TransformContext{} +var ( + _ internal.ResourceContext = (*TransformContext)(nil) + _ internal.InstrumentationScopeContext = (*TransformContext)(nil) + _ zapcore.ObjectMarshaler = (*TransformContext)(nil) +) type TransformContext struct { spanEvent ptrace.SpanEvent @@ -26,17 +31,30 @@ type TransformContext struct { instrumentationScope pcommon.InstrumentationScope resource pcommon.Resource cache pcommon.Map + scopeSpans ptrace.ScopeSpans + resouceSpans ptrace.ResourceSpans +} + +func (tCtx TransformContext) MarshalLogObject(encoder zapcore.ObjectEncoder) error { + err := encoder.AddObject("resource", logging.Resource(tCtx.resource)) + err = errors.Join(err, encoder.AddObject("scope", logging.InstrumentationScope(tCtx.instrumentationScope))) + err = errors.Join(err, encoder.AddObject("span", logging.Span(tCtx.span))) + err = errors.Join(err, encoder.AddObject("spanevent", logging.SpanEvent(tCtx.spanEvent))) + err = errors.Join(err, encoder.AddObject("cache", logging.Map(tCtx.cache))) + return err } type Option func(*ottl.Parser[TransformContext]) -func NewTransformContext(spanEvent ptrace.SpanEvent, span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext { +func NewTransformContext(spanEvent ptrace.SpanEvent, span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource, scopeSpans ptrace.ScopeSpans, resourceSpans ptrace.ResourceSpans) TransformContext { return TransformContext{ spanEvent: spanEvent, span: span, instrumentationScope: instrumentationScope, resource: resource, cache: pcommon.NewMap(), + scopeSpans: scopeSpans, + resouceSpans: resourceSpans, } } @@ -60,6 +78,14 @@ func (tCtx TransformContext) getCache() pcommon.Map { return tCtx.cache } +func (tCtx TransformContext) GetScopeSchemaURLItem() internal.SchemaURLItem { + return tCtx.scopeSpans +} + +func (tCtx TransformContext) GetResourceSchemaURLItem() internal.SchemaURLItem { + return tCtx.resouceSpans +} + func NewParser(functions map[string]ottl.Factory[TransformContext], telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) { pep := pathExpressionParser{telemetrySettings} p, err := ottl.NewParser[TransformContext]( @@ -155,14 +181,14 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot default: return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), "Span Event", internal.SpanEventRef) } - } + func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.getCache(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if m, ok := val.(pcommon.Map); ok { m.CopyTo(tCtx.getCache()) } @@ -184,10 +210,10 @@ func accessCacheKey(key []ottl.Key[TransformContext]) ottl.StandardGetSetter[Tra func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Timestamp().AsTime().UnixNano(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTimestamp, ok := val.(int64); ok { tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTimestamp))) } @@ -198,10 +224,10 @@ func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] { func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Timestamp().AsTime(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newTimestamp, ok := val.(time.Time); ok { tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(newTimestamp)) } @@ -212,10 +238,10 @@ func accessSpanEventTime() ottl.StandardGetSetter[TransformContext] { func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Name(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newName, ok := val.(string); ok { tCtx.GetSpanEvent().SetName(newName) } @@ -226,10 +252,10 @@ func accessSpanEventName() ottl.StandardGetSetter[TransformContext] { func accessSpanEventAttributes() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return tCtx.GetSpanEvent().Attributes(), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if attrs, ok := val.(pcommon.Map); ok { attrs.CopyTo(tCtx.GetSpanEvent().Attributes()) } @@ -251,10 +277,10 @@ func accessSpanEventAttributesKey(key []ottl.Key[TransformContext]) ottl.Standar func accessSpanEventDroppedAttributeCount() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ - Getter: func(ctx context.Context, tCtx TransformContext) (any, error) { + Getter: func(_ context.Context, tCtx TransformContext) (any, error) { return int64(tCtx.GetSpanEvent().DroppedAttributesCount()), nil }, - Setter: func(ctx context.Context, tCtx TransformContext, val any) error { + Setter: func(_ context.Context, tCtx TransformContext, val any) error { if newCount, ok := val.(int64); ok { tCtx.GetSpanEvent().SetDroppedAttributesCount(uint32(newCount)) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go index 6f6cc18b81c..a2e8d29e63c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go @@ -4,14 +4,16 @@ package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" import ( + "bytes" "context" + "encoding/binary" "encoding/hex" "fmt" "reflect" "strconv" "time" - jsoniter "github.com/json-iterator/go" + "github.com/goccy/go-json" "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon" @@ -109,10 +111,35 @@ func (g exprGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { } result = ottlcommon.GetValue(r.At(int(*k.Int))) case []any: - if int(*k.Int) >= len(r) || int(*k.Int) < 0 { - return nil, fmt.Errorf("index %v out of bounds", *k.Int) + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []string: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []bool: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []float64: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []int64: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err + } + case []byte: + result, err = getElementByIndex(r, k.Int) + if err != nil { + return nil, err } - result = r[*k.Int] default: return nil, fmt.Errorf("type, %T, does not support int indexing", result) } @@ -123,6 +150,13 @@ func (g exprGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { return result, nil } +func getElementByIndex[T any](r []T, idx *int64) (any, error) { + if int(*idx) >= len(r) || int(*idx) < 0 { + return nil, fmt.Errorf("index %v out of bounds", *idx) + } + return r[*idx], nil +} + type listGetter[K any] struct { slice []Getter[K] } @@ -141,6 +175,31 @@ func (l *listGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { return evaluated, nil } +type mapGetter[K any] struct { + mapValues map[string]Getter[K] +} + +func (m *mapGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { + evaluated := map[string]any{} + for k, v := range m.mapValues { + val, err := v.Get(ctx, tCtx) + if err != nil { + return nil, err + } + switch t := val.(type) { + case pcommon.Map: + evaluated[k] = t.AsRaw() + default: + evaluated[k] = t + } + } + result := pcommon.NewMap() + if err := result.FromRaw(evaluated); err != nil { + return nil, err + } + return result, nil +} + // TypeError represents that a value was not an expected type. type TypeError string @@ -401,22 +460,25 @@ func (g StandardStringLikeGetter[K]) Get(ctx context.Context, tCtx K) (*string, case []byte: result = hex.EncodeToString(v) case pcommon.Map: - result, err = jsoniter.MarshalToString(v.AsRaw()) + resultBytes, err := json.Marshal(v.AsRaw()) if err != nil { return nil, err } + result = string(resultBytes) case pcommon.Slice: - result, err = jsoniter.MarshalToString(v.AsRaw()) + resultBytes, err := json.Marshal(v.AsRaw()) if err != nil { return nil, err } + result = string(resultBytes) case pcommon.Value: result = v.AsString() default: - result, err = jsoniter.MarshalToString(v) + resultBytes, err := json.Marshal(v) if err != nil { return nil, TypeError(fmt.Sprintf("unsupported type: %T", v)) } + result = string(resultBytes) } return &result, nil } @@ -549,6 +611,81 @@ func (g StandardIntLikeGetter[K]) Get(ctx context.Context, tCtx K) (*int64, erro return &result, nil } +// ByteSliceLikeGetter is a Getter that returns []byte by converting the underlying value to an []byte if necessary +type ByteSliceLikeGetter[K any] interface { + // Get retrieves []byte value. + // The expectation is that the underlying value is converted to []byte if possible. + // If the value cannot be converted to []byte, nil and an error are returned. + // If the value is nil, nil is returned without an error. + Get(ctx context.Context, tCtx K) ([]byte, error) +} + +type StandardByteSliceLikeGetter[K any] struct { + Getter func(ctx context.Context, tCtx K) (any, error) +} + +func (g StandardByteSliceLikeGetter[K]) Get(ctx context.Context, tCtx K) ([]byte, error) { + val, err := g.Getter(ctx, tCtx) + if err != nil { + return nil, fmt.Errorf("error getting value in %T: %w", g, err) + } + if val == nil { + return nil, nil + } + var result []byte + switch v := val.(type) { + case []byte: + result = v + case string: + result = []byte(v) + case float64, int64, bool: + result, err = valueToBytes(v) + if err != nil { + return nil, fmt.Errorf("error converting value %f of %T: %w", v, g, err) + } + case pcommon.Value: + switch v.Type() { + case pcommon.ValueTypeBytes: + result = v.Bytes().AsRaw() + case pcommon.ValueTypeInt: + result, err = valueToBytes(v.Int()) + if err != nil { + return nil, fmt.Errorf("error converting value %d of int64: %w", v.Int(), err) + } + case pcommon.ValueTypeDouble: + result, err = valueToBytes(v.Double()) + if err != nil { + return nil, fmt.Errorf("error converting value %f of float64: %w", v.Double(), err) + } + case pcommon.ValueTypeStr: + result = []byte(v.Str()) + case pcommon.ValueTypeBool: + result, err = valueToBytes(v.Bool()) + if err != nil { + return nil, fmt.Errorf("error converting value %s of bool: %w", v.Str(), err) + } + default: + return nil, TypeError(fmt.Sprintf("unsupported value type: %v", v.Type())) + } + default: + return nil, TypeError(fmt.Sprintf("unsupported type: %T", v)) + } + return result, nil +} + +// valueToBytes converts a value to a byte slice of length 8. +func valueToBytes(n any) ([]byte, error) { + // Create a buffer to hold the bytes + buf := new(bytes.Buffer) + // Write the value to the buffer using binary.Write + err := binary.Write(buf, binary.BigEndian, n) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + // BoolLikeGetter is a Getter that returns a bool by converting the underlying value to a bool if necessary. type BoolLikeGetter[K any] interface { // Get retrieves a bool value. @@ -638,7 +775,7 @@ func (p *Parser[K]) newGetter(val value) (Getter[K], error) { return &literal[K]{value: *i}, nil } if eL.Path != nil { - np, err := newPath[K](eL.Path.Fields) + np, err := p.newPath(eL.Path) if err != nil { return nil, err } @@ -661,6 +798,18 @@ func (p *Parser[K]) newGetter(val value) (Getter[K], error) { return &lg, nil } + if val.Map != nil { + mg := mapGetter[K]{mapValues: map[string]Getter[K]{}} + for _, kvp := range val.Map.Values { + getter, err := p.newGetter(*kvp.Value) + if err != nil { + return nil, err + } + mg.mapValues[*kvp.Key] = getter + } + return &mg, nil + } + if val.MathExpression == nil { // In practice, can't happen since the DSL grammar guarantees one is set return nil, fmt.Errorf("no value field set. This is a bug in the OpenTelemetry Transformation Language") diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go index 86782be3c41..4ff92123c7e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go @@ -22,37 +22,58 @@ type Enum int64 type EnumSymbol string -func buildOriginalText(fields []field) string { +func buildOriginalText(path *path) string { var builder strings.Builder - for i, f := range fields { + if path.Context != "" { + builder.WriteString(path.Context) + if len(path.Fields) > 0 { + builder.WriteString(".") + } + } + for i, f := range path.Fields { builder.WriteString(f.Name) if len(f.Keys) > 0 { - for _, k := range f.Keys { - builder.WriteString("[") - if k.Int != nil { - builder.WriteString(strconv.FormatInt(*k.Int, 10)) - } - if k.String != nil { - builder.WriteString(*k.String) - } - builder.WriteString("]") - } + builder.WriteString(buildOriginalKeysText(f.Keys)) } - if i != len(fields)-1 { + if i != len(path.Fields)-1 { builder.WriteString(".") } } return builder.String() } -func newPath[K any](fields []field) (*basePath[K], error) { - if len(fields) == 0 { +func buildOriginalKeysText(keys []key) string { + var builder strings.Builder + if len(keys) > 0 { + for _, k := range keys { + builder.WriteString("[") + if k.Int != nil { + builder.WriteString(strconv.FormatInt(*k.Int, 10)) + } + if k.String != nil { + builder.WriteString(*k.String) + } + builder.WriteString("]") + } + } + return builder.String() +} + +func (p *Parser[K]) newPath(path *path) (*basePath[K], error) { + if len(path.Fields) == 0 { return nil, fmt.Errorf("cannot make a path from zero fields") } - originalText := buildOriginalText(fields) + + pathContext, fields, err := p.parsePathContext(path) + if err != nil { + return nil, err + } + + originalText := buildOriginalText(path) var current *basePath[K] for i := len(fields) - 1; i >= 0; i-- { current = &basePath[K]{ + context: pathContext, name: fields[i].Name, keys: newKeys[K](fields[i].Keys), nextPath: current, @@ -64,10 +85,56 @@ func newPath[K any](fields []field) (*basePath[K], error) { return current, nil } +func (p *Parser[K]) parsePathContext(path *path) (string, []field, error) { + hasPathContextNames := len(p.pathContextNames) > 0 + if path.Context != "" { + // no pathContextNames means the Parser isn't handling the grammar path's context yet, + // so it falls back to the previous behavior with the path.Context value as the first + // path's segment. + if !hasPathContextNames { + return "", append([]field{{Name: path.Context}}, path.Fields...), nil + } + + if _, ok := p.pathContextNames[path.Context]; !ok { + return "", path.Fields, fmt.Errorf(`context "%s" from path "%s" is not valid, it must be replaced by one of: %s`, path.Context, buildOriginalText(path), p.buildPathContextNamesText("")) + } + + return path.Context, path.Fields, nil + } + + if hasPathContextNames { + originalText := buildOriginalText(path) + return "", nil, fmt.Errorf(`missing context name for path "%s", possibly valid options are: %s`, originalText, p.buildPathContextNamesText(originalText)) + } + + return "", path.Fields, nil +} + +func (p *Parser[K]) buildPathContextNamesText(path string) string { + var builder strings.Builder + var suffix string + if path != "" { + suffix = "." + path + } + + i := 0 + for ctx := range p.pathContextNames { + builder.WriteString(fmt.Sprintf(`"%s%s"`, ctx, suffix)) + if i != len(p.pathContextNames)-1 { + builder.WriteString(", ") + } + i++ + } + return builder.String() +} + // Path represents a chain of path parts in an OTTL statement, such as `body.string`. // A Path has a name, and potentially a set of keys. // If the path in the OTTL statement contains multiple parts (separated by a dot (`.`)), then the Path will have a pointer to the next Path. type Path[K any] interface { + // Context is the OTTL context name of this Path. + Context() string + // Name is the name of this segment of the path. Name() string @@ -86,6 +153,7 @@ type Path[K any] interface { var _ Path[any] = &basePath[any]{} type basePath[K any] struct { + context string name string keys []Key[K] nextPath *basePath[K] @@ -94,6 +162,10 @@ type basePath[K any] struct { originalText string } +func (p *basePath[K]) Context() string { + return p.context +} + func (p *basePath[K]) Name() string { return p.name } @@ -283,8 +355,8 @@ func (p *Parser[K]) buildArgs(ed editor, argsVal reflect.Value) error { switch { case arg.Value.Enum != nil: name = string(*arg.Value.Enum) - case arg.Value.FunctionName != nil: - name = *arg.Value.FunctionName + case arg.FunctionName != nil: + name = *arg.FunctionName default: return fmt.Errorf("invalid function name given") } @@ -412,7 +484,7 @@ func (p *Parser[K]) buildArg(argVal value, argType reflect.Type) (any, error) { if argVal.Literal == nil || argVal.Literal.Path == nil { return nil, fmt.Errorf("must be a path") } - np, err := newPath[K](argVal.Literal.Path.Fields) + np, err := p.newPath(argVal.Literal.Path) if err != nil { return nil, err } @@ -493,6 +565,12 @@ func (p *Parser[K]) buildArg(argVal value, argType reflect.Type) (any, error) { return nil, err } return StandardBoolLikeGetter[K]{Getter: arg.Get}, nil + case strings.HasPrefix(name, "ByteSliceLikeGetter"): + arg, err := p.newGetter(argVal) + if err != nil { + return nil, err + } + return StandardByteSliceLikeGetter[K]{Getter: arg.Get}, nil case name == "Enum": arg, err := p.enumParser((*EnumSymbol)(argVal.Enum)) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go index ebd8e58ec7c..a1e5eb53a81 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go @@ -6,6 +6,7 @@ package ottl // import "github.com/open-telemetry/opentelemetry-collector-contri import ( "encoding/hex" "fmt" + "strings" "github.com/alecthomas/participle/v2/lexer" ) @@ -19,17 +20,17 @@ type parsedStatement struct { } func (p *parsedStatement) checkForCustomError() error { + validator := &grammarCustomErrorsVisitor{} if p.Converter != nil { - return fmt.Errorf("editor names must start with a lowercase letter but got '%v'", p.Converter.Function) - } - err := p.Editor.checkForCustomError() - if err != nil { - return err + validator.add(fmt.Errorf("editor names must start with a lowercase letter but got '%v'", p.Converter.Function)) } + + p.Editor.accept(validator) if p.WhereClause != nil { - return p.WhereClause.checkForCustomError() + p.WhereClause.accept(validator) } - return nil + + return validator.join() } type constExpr struct { @@ -47,14 +48,16 @@ type booleanValue struct { SubExpr *booleanExpression `parser:"| '(' @@ ')' )"` } -func (b *booleanValue) checkForCustomError() error { +func (b *booleanValue) accept(v grammarVisitor) { if b.Comparison != nil { - return b.Comparison.checkForCustomError() + b.Comparison.accept(v) + } + if b.ConstExpr != nil && b.ConstExpr.Converter != nil { + b.ConstExpr.Converter.accept(v) } if b.SubExpr != nil { - return b.SubExpr.checkForCustomError() + b.SubExpr.accept(v) } - return nil } // opAndBooleanValue represents the right side of an AND boolean expression. @@ -63,8 +66,10 @@ type opAndBooleanValue struct { Value *booleanValue `parser:"@@"` } -func (b *opAndBooleanValue) checkForCustomError() error { - return b.Value.checkForCustomError() +func (b *opAndBooleanValue) accept(v grammarVisitor) { + if b.Value != nil { + b.Value.accept(v) + } } // term represents an arbitrary number of boolean values joined by AND. @@ -73,18 +78,15 @@ type term struct { Right []*opAndBooleanValue `parser:"@@*"` } -func (b *term) checkForCustomError() error { - err := b.Left.checkForCustomError() - if err != nil { - return err +func (b *term) accept(v grammarVisitor) { + if b.Left != nil { + b.Left.accept(v) } for _, r := range b.Right { - err = r.checkForCustomError() - if err != nil { - return err + if r != nil { + r.accept(v) } } - return nil } // opOrTerm represents the right side of an OR boolean expression. @@ -93,8 +95,10 @@ type opOrTerm struct { Term *term `parser:"@@"` } -func (b *opOrTerm) checkForCustomError() error { - return b.Term.checkForCustomError() +func (b *opOrTerm) accept(v grammarVisitor) { + if b.Term != nil { + b.Term.accept(v) + } } // booleanExpression represents a true/false decision expressed @@ -105,17 +109,20 @@ type booleanExpression struct { } func (b *booleanExpression) checkForCustomError() error { - err := b.Left.checkForCustomError() - if err != nil { - return err + validator := &grammarCustomErrorsVisitor{} + b.accept(validator) + return validator.join() +} + +func (b *booleanExpression) accept(v grammarVisitor) { + if b.Left != nil { + b.Left.accept(v) } for _, r := range b.Right { - err = r.checkForCustomError() - if err != nil { - return err + if r != nil { + r.accept(v) } } - return nil } // compareOp is the type of a comparison operator. @@ -178,13 +185,9 @@ type comparison struct { Right value `parser:"@@"` } -func (c *comparison) checkForCustomError() error { - err := c.Left.checkForCustomError() - if err != nil { - return err - } - err = c.Right.checkForCustomError() - return err +func (c *comparison) accept(v grammarVisitor) { + c.Left.accept(v) + c.Right.accept(v) } // editor represents the function call of a statement. @@ -195,19 +198,11 @@ type editor struct { Keys []key `parser:"( @@ )*"` } -func (i *editor) checkForCustomError() error { - var err error - +func (i *editor) accept(v grammarVisitor) { + v.visitEditor(i) for _, arg := range i.Arguments { - err = arg.checkForCustomError() - if err != nil { - return err - } + arg.accept(v) } - if i.Keys != nil { - return fmt.Errorf("only paths and converters may be indexed, not editors, but got %v %v", i.Function, i.Keys) - } - return nil } // converter represents a converter function call. @@ -217,13 +212,22 @@ type converter struct { Keys []key `parser:"( @@ )*"` } +func (c *converter) accept(v grammarVisitor) { + if c.Arguments != nil { + for _, a := range c.Arguments { + a.accept(v) + } + } +} + type argument struct { - Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"` - Value value `parser:"@@"` + Name string `parser:"(@(Lowercase(Uppercase | Lowercase)*) Equal)?"` + Value value `parser:"( @@"` + FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*) )"` } -func (a *argument) checkForCustomError() error { - return a.Value.checkForCustomError() +func (a *argument) accept(v grammarVisitor) { + a.Value.accept(v) } // value represents a part of a parsed statement which is resolved to a value of some sort. This can be a telemetry path @@ -236,23 +240,33 @@ type value struct { String *string `parser:"| @String"` Bool *boolean `parser:"| @Boolean"` Enum *enumSymbol `parser:"| @Uppercase (?! Lowercase)"` - FunctionName *string `parser:"| @(Uppercase(Uppercase | Lowercase)*)"` + Map *mapValue `parser:"| @@"` List *list `parser:"| @@)"` } -func (v *value) checkForCustomError() error { +func (v *value) accept(vis grammarVisitor) { + vis.visitValue(v) if v.Literal != nil { - return v.Literal.checkForCustomError() + v.Literal.accept(vis) } if v.MathExpression != nil { - return v.MathExpression.checkForCustomError() + v.MathExpression.accept(vis) + } + if v.Map != nil { + v.Map.accept(vis) + } + if v.List != nil { + for _, i := range v.List.Values { + i.accept(vis) + } } - return nil } // path represents a telemetry path mathExpression. type path struct { - Fields []field `parser:"@@ ( '.' @@ )*"` + Pos lexer.Position + Context string `parser:"(@Lowercase '.')?"` + Fields []field `parser:"@@ ( '.' @@ )*"` } // field is an item within a path. @@ -270,6 +284,23 @@ type list struct { Values []value `parser:"'[' (@@)* (',' @@)* ']'"` } +type mapValue struct { + Values []mapItem `parser:"'{' (@@ ','?)* '}'"` +} + +func (m *mapValue) accept(v grammarVisitor) { + for _, i := range m.Values { + if i.Value != nil { + i.Value.accept(v) + } + } +} + +type mapItem struct { + Key *string `parser:"@String ':'"` + Value *value `parser:"@@"` +} + // byteSlice type for capturing byte slices type byteSlice []byte @@ -308,11 +339,17 @@ type mathExprLiteral struct { Path *path `parser:"| @@ )"` } -func (m *mathExprLiteral) checkForCustomError() error { +func (m *mathExprLiteral) accept(v grammarVisitor) { + v.visitMathExprLiteral(m) + if m.Path != nil { + v.visitPath(m.Path) + } if m.Editor != nil { - return fmt.Errorf("converter names must start with an uppercase letter but got '%v'", m.Editor.Function) + m.Editor.accept(v) + } + if m.Converter != nil { + m.Converter.accept(v) } - return nil } type mathValue struct { @@ -320,11 +357,13 @@ type mathValue struct { SubExpression *mathExpression `parser:"| '(' @@ ')' )"` } -func (m *mathValue) checkForCustomError() error { +func (m *mathValue) accept(v grammarVisitor) { if m.Literal != nil { - return m.Literal.checkForCustomError() + m.Literal.accept(v) + } + if m.SubExpression != nil { + m.SubExpression.accept(v) } - return m.SubExpression.checkForCustomError() } type opMultDivValue struct { @@ -332,8 +371,10 @@ type opMultDivValue struct { Value *mathValue `parser:"@@"` } -func (m *opMultDivValue) checkForCustomError() error { - return m.Value.checkForCustomError() +func (m *opMultDivValue) accept(v grammarVisitor) { + if m.Value != nil { + m.Value.accept(v) + } } type addSubTerm struct { @@ -341,18 +382,15 @@ type addSubTerm struct { Right []*opMultDivValue `parser:"@@*"` } -func (m *addSubTerm) checkForCustomError() error { - err := m.Left.checkForCustomError() - if err != nil { - return err +func (m *addSubTerm) accept(v grammarVisitor) { + if m.Left != nil { + m.Left.accept(v) } for _, r := range m.Right { - err = r.checkForCustomError() - if err != nil { - return err + if r != nil { + r.accept(v) } } - return nil } type opAddSubTerm struct { @@ -360,8 +398,10 @@ type opAddSubTerm struct { Term *addSubTerm `parser:"@@"` } -func (m *opAddSubTerm) checkForCustomError() error { - return m.Term.checkForCustomError() +func (r *opAddSubTerm) accept(v grammarVisitor) { + if r.Term != nil { + r.Term.accept(v) + } } type mathExpression struct { @@ -369,18 +409,17 @@ type mathExpression struct { Right []*opAddSubTerm `parser:"@@*"` } -func (m *mathExpression) checkForCustomError() error { - err := m.Left.checkForCustomError() - if err != nil { - return err +func (m *mathExpression) accept(v grammarVisitor) { + if m.Left != nil { + m.Left.accept(v) } - for _, r := range m.Right { - err = r.checkForCustomError() - if err != nil { - return err + if m.Right != nil { + for _, r := range m.Right { + if r != nil { + r.accept(v) + } } } - return nil } type mathOp int @@ -444,9 +483,80 @@ func buildLexer() *lexer.StatefulDefinition { {Name: `Equal`, Pattern: `=`}, {Name: `LParen`, Pattern: `\(`}, {Name: `RParen`, Pattern: `\)`}, + {Name: `LBrace`, Pattern: `\{`}, + {Name: `RBrace`, Pattern: `\}`}, + {Name: `Colon`, Pattern: `\:`}, {Name: `Punct`, Pattern: `[,.\[\]]`}, {Name: `Uppercase`, Pattern: `[A-Z][A-Z0-9_]*`}, {Name: `Lowercase`, Pattern: `[a-z][a-z0-9_]*`}, {Name: "whitespace", Pattern: `\s+`}, }) } + +// grammarCustomError represents a grammar error in which the statement has a valid syntax +// according to the grammar's definition, but is still logically invalid. +type grammarCustomError struct { + errs []error +} + +// Error returns all errors messages separate by semicolons. +func (e *grammarCustomError) Error() string { + switch len(e.errs) { + case 0: + return "" + case 1: + return e.errs[0].Error() + default: + var b strings.Builder + b.WriteString(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b.WriteString("; ") + b.WriteString(err.Error()) + } + return b.String() + } +} + +func (e *grammarCustomError) Unwrap() []error { + return e.errs +} + +// grammarVisitor allows accessing the grammar AST nodes using the visitor pattern. +type grammarVisitor interface { + visitPath(v *path) + visitEditor(v *editor) + visitValue(v *value) + visitMathExprLiteral(v *mathExprLiteral) +} + +// grammarCustomErrorsVisitor is used to execute custom validations on the grammar AST. +type grammarCustomErrorsVisitor struct { + errs []error +} + +func (g *grammarCustomErrorsVisitor) add(err error) { + g.errs = append(g.errs, err) +} + +func (g *grammarCustomErrorsVisitor) join() error { + if len(g.errs) == 0 { + return nil + } + return &grammarCustomError{errs: g.errs} +} + +func (g *grammarCustomErrorsVisitor) visitPath(_ *path) {} + +func (g *grammarCustomErrorsVisitor) visitValue(_ *value) {} + +func (g *grammarCustomErrorsVisitor) visitEditor(v *editor) { + if v.Keys != nil { + g.add(fmt.Errorf("only paths and converters may be indexed, not editors, but got %s%s", v.Function, buildOriginalKeysText(v.Keys))) + } +} + +func (g *grammarCustomErrorsVisitor) visitMathExprLiteral(v *mathExprLiteral) { + if v.Editor != nil { + g.add(fmt.Errorf("converter names must start with an uppercase letter but got '%v'", v.Editor.Function)) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml index e7333a2c5fd..b326d06fb18 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/metadata.yaml @@ -5,4 +5,5 @@ status: stability: alpha: [ traces, metrics, logs ] codeowners: - active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley] \ No newline at end of file + active: [TylerHelmuth, kentquirk, bogdandrutu, evan-bradley] + seeking_new: true \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md index 94712ca3074..8a4aec76ca5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md @@ -29,9 +29,9 @@ In these situations the function will error if it does not know how to do the co Use `ErrorMode` to determine how the `Statement` handles these errors. See the component-specific guides for how each uses error mode: -- [filterprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/filterprocessor#ottl) -- [routingprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/routingprocessor#tech-preview-opentelemetry-transformation-language-statements-as-routing-conditions) -- [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/transformprocessor#config) +- [filterprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md#configuration) +- [routingconnector](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/connector/routingconnector/README.md#configuration) +- [transformprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md#config) ## Editors @@ -39,14 +39,16 @@ Editors are what OTTL uses to transform telemetry. Editors: -- Are allowed to transform telemetry. When a Function is invoked the expectation is that the underlying telemetry is modified in some way. -- May have side effects. Some Functions may generate telemetry and add it to the telemetry payload to be processed in this batch. -- May return values. Although not common and not required, Functions may return values. +- Are allowed to transform telemetry. When an Editor is invoked the expectation is that the underlying telemetry is modified in some way. +- May have side effects. Some Editors may generate telemetry and add it to the telemetry payload to be processed in this batch. +- May return values. Although not common and not required, Editors may return values. Available Editors: +- [append](#append) - [delete_key](#delete_key) - [delete_matching_keys](#delete_matching_keys) +- [keep_matching_keys](#keep_matching_keys) - [flatten](#flatten) - [keep_keys](#keep_keys) - [limit](#limit) @@ -58,6 +60,19 @@ Available Editors: - [set](#set) - [truncate_all](#truncate_all) +### append + +`append(target, Optional[value], Optional[values])` + +The `append` function appends single or multiple string values to `target`. +`append` converts scalar values into an array if the field exists but is not an array, and creates an array containing the provided values if the field doesn’t exist. + +Resulting field is always of type `pcommon.Slice` and will not convert the types of existing or new items in the slice. This means that it is possible to create a slice whose elements have different types. Be careful when using `append` to set attribute values, as this will produce values that are not possible to create through OpenTelemetry APIs [according to](https://opentelemetry.io/docs/specs/otel/common/#attribute) the OpenTelemetry specification. + +- `append(attributes["tags"], "prod")` +- `append(attributes["tags"], values = ["staging", "staging:east"])` +- `append(attributes["tags_copy"], attributes["tags"])` + ### delete_key `delete_key(target, key)` @@ -92,6 +107,23 @@ Examples: - `delete_matching_keys(resource.attributes, "(?i).*password.*")` +### keep_matching_keys + +`keep_matching_keys(target, pattern)` + +The `keep_matching_keys` function keeps all keys from a `pcommon.Map` that match a regex pattern. + +`target` is a path expression to a `pcommon.Map` type field. `pattern` is a regex string. + +All keys that match the pattern will remain in the map, while non matching keys will be removed. + +Examples: + + +- `keep_matching_keys(attributes, "(?i).*version.*")` + +- `keep_matching_keys(resource.attributes, "(?i).*version.*")` + ### flatten `flatten(target, Optional[prefix], Optional[depth])` @@ -378,48 +410,74 @@ Unlike functions, they do not modify any input telemetry and always return a val Available Converters: - [Base64Decode](#base64decode) +- [Decode](#decode) - [Concat](#concat) - [ConvertCase](#convertcase) +- [ConvertAttributesToElementsXML](#convertattributestoelementsxml) +- [ConvertTextToElementsXML](#converttexttoelementsxml) +- [Day](#day) +- [Double](#double) +- [Duration](#duration) - [ExtractPatterns](#extractpatterns) +- [ExtractGrokPatterns](#extractgrokpatterns) - [FNV](#fnv) +- [Format](#format) +- [GetXML](#getxml) +- [Hex](#hex) - [Hour](#hour) - [Hours](#hours) -- [Double](#double) -- [Duration](#duration) +- [InsertXML](#insertxml) - [Int](#int) - [IsBool](#isbool) - [IsDouble](#isdouble) - [IsInt](#isint) +- [IsRootSpan](#isrootspan) - [IsMap](#ismap) - [IsMatch](#ismatch) +- [IsList](#islist) - [IsString](#isstring) - [Len](#len) - [Log](#log) +- [MD5](#md5) - [Microseconds](#microseconds) - [Milliseconds](#milliseconds) +- [Minute](#minute) - [Minutes](#minutes) +- [Month](#month) - [Nanoseconds](#nanoseconds) - [Now](#now) - [ParseCSV](#parsecsv) - [ParseJSON](#parsejson) - [ParseKeyValue](#parsekeyvalue) +- [ParseSimplifiedXML](#parsesimplifiedxml) - [ParseXML](#parsexml) +- [RemoveXML](#removexml) - [Seconds](#seconds) - [SHA1](#sha1) - [SHA256](#sha256) +- [SHA512](#sha512) +- [SliceToMap](#slicetomap) +- [Sort](#sort) - [SpanID](#spanid) - [Split](#split) +- [String](#string) - [Substring](#substring) - [Time](#time) +- [ToKeyValueString](#tokeyvaluestring) - [TraceID](#traceid) - [TruncateTime](#truncatetime) +- [Unix](#unix) - [UnixMicro](#unixmicro) - [UnixMilli](#unixmilli) - [UnixNano](#unixnano) - [UnixSeconds](#unixseconds) +- [UserAgent](#useragent) - [UUID](#UUID) +- [Year](#year) -### Base64Decode +### Base64Decode (Deprecated) + +*This function has been deprecated. Please use the [Decode](#decode) function instead.* `Base64Decode(value)` @@ -434,13 +492,29 @@ Examples: - `Base64Decode(attributes["encoded field"])` +### Decode + +`Decode(value, encoding)` + +The `Decode` Converter takes a string or byte array encoded with the specified encoding and returns the decoded string. + +`value` is a valid encoded string or byte array. +`encoding` is a valid encoding name included in the [IANA encoding index](https://www.iana.org/assignments/character-sets/character-sets.xhtml). + +Examples: + +- `Decode("aGVsbG8gd29ybGQ=", "base64")` + + +- `Decode(attributes["encoded field"], "us-ascii")` + ### Concat `Concat(values[], delimiter)` -The `Concat` Converter takes a delimiter and a sequence of values and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string. +The `Concat` Converter takes a sequence of values and a delimiter and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string. -`values` is a list of values passed as arguments. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs). +`values` is a list of values. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs). `delimiter` is a string value that is placed between strings during concatenation. If no delimiter is desired, then simply pass an empty string. @@ -477,6 +551,75 @@ Examples: - `ConvertCase(metric.name, "snake")` +### ConvertAttributesToElementsXML + +`ConvertAttributesToElementsXML(target, Optional[xpath])` + +The `ConvertAttributesToElementsXML` Converter returns an edited version of an XML string where attributes are converted into child elements. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or cannot be parsed as XML, `ConvertAttributesToElementsXML` will return an error. + +`xpath` (optional) is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. Attributes will only be converted within the result(s) of the xpath. + +For example, `baz` will be converted to `bazbar`. + +Examples: + +Convert all attributes in a document + +- `ConvertAttributesToElementsXML(body)` + +Convert only attributes within "Record" elements + +- `ConvertAttributesToElementsXML(body, "/Log/Record")` + +### ConvertTextToElementsXML + +`ConvertTextToElementsXML(target, Optional[xpath], Optional[elementName])` + +The `ConvertTextToElementsXML` Converter returns an edited version of an XML string where all text belongs to a dedicated element. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or cannot be parsed as XML, `ConvertTextToElementsXML` will return an error. + +`xpath` (optional) is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. Content will only be converted within the result(s) of the xpath. The default is `/`. + +`elementName` (optional) is a string that is used for any element tags that are created to wrap content. +The default is `"value"`. + +For example, `foobar` will be converted to `foobar`. + +Examples: + +Ensure all text content in a document is wrapped in a dedicated element + +- `ConvertTextToElementsXML(body)` + +Use a custom name for any new elements + +- `ConvertTextToElementsXML(body, elementName = "custom")` + +Convert only part of the document + +- `ConvertTextToElementsXML(body, "/some/part/", "value")` + +### Day + +`Day(value)` + +The `Day` Converter returns the day component from the specified time using the Go stdlib [`time.Day` function](https://pkg.go.dev/time#Time.Day). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Day(Now())` + ### Double The `Double` Converter converts an inputted `value` into a double. @@ -532,6 +675,97 @@ Examples: - `ExtractPatterns(body, "^(?P\\w+ \\w+ [0-9]+:[0-9]+:[0-9]+) (?P([A-Za-z0-9-_]+)) (?P\\w+)(\\[(?P\\d+)\\])?: (?P.*)$")` +### ExtractGrokPatterns + +`ExtractGrokPatterns(target, pattern, Optional[namedCapturesOnly], Optional[patternDefinitions])` + +The `ExtractGrokPatterns` Converter parses unstructured data into a format that is structured and queryable. +It returns a `pcommon.Map` struct that is a result of extracting named capture groups from the target string. If no matches are found then an empty `pcommon.Map` is returned. + +- `target` is a Getter that returns a string. +- `pattern` is a grok pattern string. +- `namedCapturesOnly` (optional) specifies if non-named captures should be returned. +- `patternDefinitions` (optional) is a list of custom pattern definition strings used inside `pattern` in the form of `PATTERN_NAME=PATTERN`. +This parameter lets you define your own custom patterns to improve readability when the extracted `pattern` is not part of the default set or when you need custom naming. + +If `target` is not a string or nil `ExtractGrokPatterns` returns an error. If `pattern` does not contain at least 1 named capture group and `namedCapturesOnly` is set to `true` then `ExtractPatterns` errors on startup. + +Parsing is done using [Elastic Go-Grok](https://github.com/elastic/go-grok?tab=readme-ov-file) library. +Grok is a regular expression dialect that supports reusable aliased expressions. It sits on `re2` regex library so any valid `re2` expressions are valid in grok. +Grok uses this regular expression language to allow naming existing patterns and combining them into more complex patterns that match your fields + +Pattern can be specified in either of these forms: + - `%{SYNTAX}` - e.g {NUMBER} + - `%{SYNTAX:ID}` - e.g {NUMBER:MY_AGE} + - `%{SYNTAX:ID:TYPE}` - e.g {NUMBER:MY_AGE:INT} + +Where `SYNTAX` is a pattern that will match your text, `ID` is identifier you give to the piece of text being matched and `TYPE` data type you want to cast your named field. +Supported types are `int`, `long`, `double`, `float` and boolean + +The [Elastic Go-Grok](https://github.com/elastic/go-grok) ships with numerous predefined grok patterns that simplify working with grok. +In collector Complete set is included consisting of a default set and all additional sets adding product/tool specific capabilities (like [aws](https://github.com/elastic/go-grok/blob/main/patterns/aws.go) or [java](https://github.com/elastic/go-grok/blob/main/patterns/java.go) patterns). + + +Default set consists of: + +| Name | Example | +|-----|-----| +| WORD | "hello", "world123", "test_data" | +| NOTSPACE | "example", "text-with-dashes", "12345" | +| SPACE | " ", "\t", " " | +| INT | "123", "-456", "+789" | +| NUMBER | "123", "456.789", "-0.123" | +| BOOL |"true", "false", "true" | +| BASE10NUM | "123", "-123.456", "0.789" | +| BASE16NUM | "1a2b", "0x1A2B", "-0x1a2b3c" | +| BASE16FLOAT | "0x1.a2b3", "-0x1A2B3C.D" | +| POSINT | "123", "456", "789" | +| NONNEGINT | "0", "123", "456" | +| GREEDYDATA |"anything goes", "literally anything", "123 #@!" | +| QUOTEDSTRING | "\"This is a quote\"", "'single quoted'" | +| UUID |"123e4567-e89b-12d3-a456-426614174000" | +| URN | "urn:isbn:0451450523", "urn:ietf:rfc:2648" | + +and many more. Complete list can be found [here](https://github.com/elastic/go-grok/blob/main/patterns/default.go). + +Examples: + +- _Uses regex pattern with named captures to extract_: + + `ExtractGrokPatterns(attributes["k8s.change_cause"], "GIT_SHA=(?P\w+)")` + +- _Uses regex pattern with named captures to extract_: + + `ExtractGrokPatterns(body, "^(?P\\w+ \\w+ [0-9]+:[0-9]+:[0-9]+) (?P([A-Za-z0-9-_]+)) (?P\\w+)(\\[(?P\\d+)\\])?: (?P.*)$")` + +- _Uses `URI` from default set to extract URI and includes only named captures_: + + `ExtractGrokPatterns(body, "%{URI}", true)` + +- _Uses more complex pattern consisting of elements from default set and includes only named captures_: + + `ExtractGrokPatterns(body, "%{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int}", true)` + +- _Uses `LOGLINE` pattern defined in `patternDefinitions` passed as last argument_: + + `ExtractGrokPatterns(body, "%{LOGLINE}", true, ["LOGLINE=%{DATESTAMP:timestamp} %{TZ:event.timezone} %{DATA:user.name} %{GREEDYDATA:postgresql.log.connection_id} %{POSINT:process.pid:int}"])` + +- Add custom patterns to parse the password from `/etc/passwd` and making `pattern` readable: + + - `pattern`: `%{USERNAME:user.name}:%{PASSWORD:user.password}:%{USERINFO}` + - `patternDefinitions`: + - `PASSWORD=%{WORD}` + - `USERINFO=%{GREEDYDATA}` + + Note that `USERNAME` is in the default pattern set and does not need to be redefined. + + - Target: `smith:pass123:1001:1000:J Smith,1234,(234)567-8910,(234)567-1098,email:/home/smith:/bin/sh` + + - Return values: + - `user.name`: smith + - `user.password`: pass123 + + ### FNV `FNV(value)` @@ -551,6 +785,95 @@ Examples: - `FNV("name")` +### Format + +```Format(formatString, []formatArguments)``` + +The `Format` Converter takes the given format string and formats it using `fmt.Sprintf` and the given arguments. + +`formatString` is a string. `formatArguments` is an array of values. + +If the `formatString` is not a string or does not exist, the `Format` Converter will return an error. +If any of the `formatArgs` are incorrect (e.g. missing, or an incorrect type for the corresponding format specifier), then a string will still be returned, but with Go's default error handling for `fmt.Sprintf`. + +Format specifiers that can be used in `formatString` are documented in Go's [fmt package documentation](https://pkg.go.dev/fmt#hdr-Printing) + +Examples: + +- `Format("%02d", [attributes["priority"]])` +- `Format("%04d-%02d-%02d", [Year(Now()), Month(Now()), Day(Now())])` +- `Format("%s/%s/%04d-%02d-%02d.log", [attributes["hostname"], body["program"], Year(Now()), Month(Now()), Day(Now())])` + + +### GetXML + +`GetXML(target, xpath)` + +The `GetXML` Converter returns an XML string with selected elements. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or is not valid xml, `GetXML` will return an error. + +`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. Currently, this converter only supports selecting elements. + +Examples: + +Get all elements at the root of the document with tag "a" + +- `GetXML(body, "/a")` + +Gel all elements anywhere in the document with tag "a" + +- `GetXML(body, "//a")` + +Get the first element at the root of the document with tag "a" + +- `GetXML(body, "/a[1]")` + +Get all elements in the document with tag "a" that have an attribute "b" with value "c" + +- `GetXML(body, "//a[@b='c']")` + +Get `foo` from `foo` + +- `GetXML(body, "/a/text()")` + +Get `hello` from `` + +- `GetXML(body, "/a/text()")` + +Get `bar` from `` + +- `GetXML(body, "/a/@foo")` + +### Hex + +`Hex(value)` + +The `Hex` converter converts the `value` to its hexadecimal representation. + +The returned type is string representation of the hexadecimal value. + +The input `value` types: + +- float64 (`1.1` will result to `0x3ff199999999999a`) +- string (`"1"` will result in `0x31`) +- bool (`true` will result in `0x01`; `false` to `0x00`) +- int64 (`12` will result in `0xC`) +- []byte (without any changes - `0x02` will result to `0x02`) + +If `value` is another type or parsing failed nil is always returned. + +The `value` is either a path expression to a telemetry field to retrieve or a literal. + +Examples: + +- `Hex(attributes["http.status_code"])` + + +- `Hex(2.0)` + ### Hour `Hour(value)` @@ -579,6 +902,35 @@ Examples: - `Hours(Duration("1h"))` +### InsertXML + +`InsertXML(target, xpath, value)` + +The `InsertXML` Converter returns an edited version of an XML string with child elements added to selected elements. + +`target` is a Getter that returns a string. This string should be in XML format and represents the document which will +be modified. If `target` is not a string, nil, or is not valid xml, `InsertXML` will return an error. + +`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements. + +`value` is a Getter that returns a string. This string should be in XML format and represents the document which will +be inserted into `target`. If `value` is not a string, nil, or is not valid xml, `InsertXML` will return an error. + +Examples: + +Add an element "foo" to the root of the document + +- `InsertXML(body, "/", "")` + +Add an element "bar" to any element called "foo" + +- `InsertXML(body, "//foo", "")` + +Fetch and insert an xml document into another + +- `InsertXML(body, "/subdoc", attributes["subdoc"])` + ### Int `Int(value)` @@ -663,6 +1015,23 @@ Examples: - `IsInt(attributes["maybe a int"])` +### IsRootSpan + +`IsRootSpan()` + +The `IsRootSpan` Converter returns `true` if the span in the corresponding context is root, which means +its `parent_span_id` is equal to hexadecimal representation of zero. + +This function is supported with [OTTL span context](../contexts/ottlspan/README.md). In any other context it is not supported. + +The function returns `false` in all other scenarios, including `parent_span_id == ""` or `parent_span_id == nil`. + +Examples: + +- `IsRootSpan()` + +- `set(attributes["isRoot"], "true") where IsRootSpan()` + ### IsMap `IsMap(value)` @@ -706,6 +1075,22 @@ Examples: - `IsMatch("string", ".*ring")` +### IsList + +`IsList(value)` + +The `IsList` Converter returns true if the given value is a list. + +The `value` is either a path expression to a telemetry field to retrieve or a literal. + +If `value` is a `list`, `pcommon.ValueTypeSlice`. `pcommon.Slice`, or any other list type, then returns `true`, otherwise returns `false`. + +Examples: + +- `IsList(body)` + +- `IsList(attributes["maybe a slice"])` + ### IsString `IsString(value)` @@ -762,6 +1147,26 @@ Examples: - `Int(Log(attributes["duration_ms"])` +### MD5 + +`MD5(value)` + +The `MD5` Converter converts the `value` to a md5 hash/digest. + +The returned type is string. + +`value` is either a path expression to a string telemetry field or a literal string. If `value` is another type an error is returned. + +If an error occurs during hashing it will be returned. + +Examples: + +- `MD5(attributes["device.name"])` + +- `MD5("name")` + +**Note:** According to the National Institute of Standards and Technology (NIST), MD5 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer a SHA-2 family function (e.g. SHA-256, SHA-512) whenever possible. + ### Microseconds `Microseconds(value)` @@ -790,6 +1195,20 @@ Examples: - `Milliseconds(Duration("1h"))` +### Minute + +`Minute(value)` + +The `Minute` Converter returns the minute component from the specified time using the Go stdlib [`time.Minute` function](https://pkg.go.dev/time#Time.Minute). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Minute(Now())` + ### Minutes `Minutes(value)` @@ -804,6 +1223,20 @@ Examples: - `Minutes(Duration("1h"))` +### Month + +`Month(value)` + +The `Month` Converter returns the month component from the specified time using the Go stdlib [`time.Month` function](https://pkg.go.dev/time#Time.Month). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: + +- `Month(Now())` + ### Nanoseconds `Nanoseconds(value)` @@ -867,12 +1300,12 @@ Examples: `ParseJSON(target)` -The `ParseJSON` Converter returns a `pcommon.Map` struct that is a result of parsing the target string as JSON +The `ParseJSON` Converter returns a `pcommon.Map` or `pcommon.Slice` struct that is a result of parsing the target string as JSON `target` is a Getter that returns a string. This string should be in json format. If `target` is not a string, nil, or cannot be parsed as JSON, `ParseJSON` will return an error. -Unmarshalling is done using [jsoniter](https://github.com/json-iterator/go). +Unmarshalling is done using [goccy/go-json](https://github.com/goccy/go-json). Each JSON type is converted into a `pdata.Value` using the following map: ``` @@ -889,6 +1322,9 @@ Examples: - `ParseJSON("{\"attr\":true}")` +- `ParseJSON("[\"attr1\",\"attr2\"]")` + + - `ParseJSON(attributes["kubernetes"])` @@ -913,6 +1349,132 @@ Examples: - `ParseKeyValue("k1!v1_k2!v2_k3!v3", "!", "_")` - `ParseKeyValue(attributes["pairs"])` +### ParseSimplifiedXML + +`ParseSimplifiedXML(target)` + +The `ParseSimplifiedXML` Converter returns a `pcommon.Map` struct that is the result of parsing the target string without preservation of attributes or extraneous text content. + +The goal of this Converter is to produce a more user-friendly representation of XML data than the [`ParseXML`](#parsexml) Converter. +This Converter should be preferred over `ParseXML` when minor semantic details (e.g. order of elements) are not critically important, when subsequent processing or querying of the result is expected, or when human-readability is a concern. + +This Converter disregards certain aspects of XML, specifically attributes and extraneous text content, in order to produce +a direct representation of XML data. Users are encouraged to simplify their XML documents prior to using `ParseSimplifiedXML`. + +See other functions which may be useful for preparing XML documents: + +- [`ConvertAttributesToElementsXML`](#convertattributestoelementsxml) +- [`ConvertTextToElementsXML`](#converttexttoelementsxml) +- [`RemoveXML`](#removexml) +- [`InsertXML`](#insertxml) +- [`GetXML`](#getxml) + +#### Formal Definitions + +A "Simplified XML" document contains no attributes and no extraneous text content. + +An element has "extraneous text content" when it contains both text and element content. e.g. + +```xml + + bar + world + +``` + +#### Parsing logic + +1. Declaration elements, attributes, comments, and extraneous text content are ignored. +2. Elements which contain a value are converted into key/value pairs. + e.g. `bar` becomes `"foo": "bar"` +3. Elements which contain child elements are converted into a key/value pair where the value is a map. + e.g. ` baz ` becomes `"foo": { "bar": "baz" }` +4. Sibling elements that share the same tag will be combined into a slice. + e.g. ` 1 2 3 ` becomes `"a": { "b": "1", "c": [ "2", "3" ] }`. +5. Empty elements are dropped, but they can determine whether a value should be a slice or map. + e.g. ` 1 ` becomes `"a": { "b": [ "1" ] }` instead of `"a": { "b": "1" }` + +#### Examples + +Parse a Simplified XML document from the body: + +```xml + + 1 + jane +
+ + Something happened + unknown +
+
+``` + +```json +{ + "event": { + "id": 1, + "user": "jane", + "details": { + "time": "2021-10-01T12:00:00Z", + "description": "Something happened", + "cause": "unknown" + } + } +} +``` + +Parse a Simplified XML document with unique child elements: + +```xml + + 1 + 2 + +``` + +```json +{ + "x": { + "y": "1", + "z": "2" + } +} +``` + +Parse a Simplified XML document with multiple elements of the same tag: + +```xml + + 1 + 2 + +``` + +```json +{ + "a": { + "b": ["1", "2"] + } +} +``` + +Parse a Simplified XML document with CDATA element: + +```xml + + 1 + + +``` + +```json +{ + "a": { + "b": ["1", "2"] + } +} +``` ### ParseXML @@ -984,7 +1546,70 @@ Examples: - `ParseXML("")` +### RemoveXML + +`RemoveXML(target, xpath)` + +The `RemoveXML` Converter returns an edited version of an XML string with selected elements removed. + +`target` is a Getter that returns a string. This string should be in XML format. +If `target` is not a string, nil, or is not valid xml, `RemoveXML` will return an error. + +`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that +selects one or more elements to remove from the XML document. + +For example, the XPath `/Log/Record[./Name/@type="archive"]` applied to the following XML document: + +```xml + + + + 00001 + + Some data + + + 00002 + + Some data + + +``` + +will return: + +```xml + + + + 00002 + + Some data + + +``` + +Examples: + +Delete the attribute "foo" from the elements with tag "a" + +- `RemoveXML(body, "/a/@foo")` + +Delete all elements with tag "b" that are children of elements with tag "a" + +- `RemoveXML(body, "/a/b")` + +Delete all elements with tag "b" that are children of elements with tag "a" and have the attribute "foo" with value "bar" + +- `RemoveXML(body, "/a/b[@foo='bar']")` + +Delete all comments + +- `RemoveXML(body, "//comment()")` +Delete text from nodes that contain the word "sensitive" + +- `RemoveXML(body, "//*[contains(text(), 'sensitive')]")` ### Seconds @@ -1019,7 +1644,7 @@ Examples: - `SHA1("name")` -**Note:** According to the National Institute of Standards and Technology (NIST), SHA1 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer FNV whenever possible. +**Note:** [According to the National Institute of Standards and Technology (NIST)](https://csrc.nist.gov/projects/hash-functions), SHA1 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer a SHA-2 family function (such as SHA-256 or SHA-512) whenever possible. ### SHA256 @@ -1037,10 +1662,113 @@ Examples: - `SHA256(attributes["device.name"])` - - `SHA256("name")` -**Note:** According to the National Institute of Standards and Technology (NIST), SHA256 is no longer a recommended hash function. It should be avoided except when required for compatibility. New uses should prefer FNV whenever possible. +### SHA512 + +`SHA512(input)` + +The `SHA512` converter calculates sha512 hash value/digest of the `input`. + +The returned type is string. + +`input` is either a path expression to a string telemetry field or a literal string. If `input` is another type, converter raises an error. +If an error occurs during hashing, the error will be returned. + +Examples: + +- `SHA512(attributes["device.name"])` + +- `SHA512("name")` + +### SliceToMap + +`SliceToMap(target, keyPath, Optional[valuePath])` + +The `SliceToMap` converter converts a slice of objects to a map. The arguments are as follows: + +- `target`: A list of maps containing the entries to be converted. +- `keyPath`: A string array that determines the name of the keys for the map entries by pointing to the value of an attribute within each slice item. Note that +the `keyPath` must resolve to a string value, otherwise the converter will not be able to convert the item +to a map entry. +- `valuePath`: This optional string array determines which attribute should be used as the value for the map entry. If no +`valuePath` is defined, the value of the map entry will be the same as the original slice item. + +Examples: + +The examples below will convert the following input: + +```yaml +attributes: + hello: world + things: + - name: foo + value: 2 + - name: bar + value: 5 +``` + +- `SliceToMap(attributes["things"], ["name"])`: + +This converts the input above to the following: + +```yaml +attributes: + hello: world + things: + foo: + name: foo + value: 2 + bar: + name: bar + value: 5 +``` + +- `SliceToMap(attributes["things"], ["name"], ["value"])`: + +This converts the input above to the following: + +```yaml +attributes: + hello: world + things: + foo: 2 + bar: 5 +``` + +Once the `SliceToMap` function has been applied to a value, the converted entries are addressable via their keys: + +- `set(attributes["thingsMap"], SliceToMap(attributes["things"], ["name"]))` +- `set(attributes["element_1"], attributes["thingsMap"]["foo'])` +- `set(attributes["element_2"], attributes["thingsMap"]["bar'])` + +### Sort + +`Sort(target, Optional[order])` + +The `Sort` Converter sorts the `target` array in either ascending or descending order. + +`target` is an array or `pcommon.Slice` typed field containing the elements to be sorted. + +`order` is a string specifying the sort order. Must be either `asc` or `desc`. The default value is `asc`. + +The Sort Converter preserves the data type of the original elements while sorting. +The behavior varies based on the types of elements in the target slice: + +| Element Types | Sorting Behavior | Return Value | +|---------------|-------------------------------------|--------------| +| Integers | Sorts as integers | Sorted array of integers | +| Doubles | Sorts as doubles | Sorted array of doubles | +| Integers and doubles | Converts all to doubles, then sorts | Sorted array of integers and doubles | +| Strings | Sorts as strings | Sorted array of strings | +| Booleans | Converts all to strings, then sorts | Sorted array of booleans | +| Mix of integers, doubles, booleans, and strings | Converts all to strings, then sorts | Sorted array of mixed types | +| Any other types | N/A | Returns an error | + +Examples: + +- `Sort(attributes["device.tags"])` +- `Sort(attributes["device.tags"], "desc")` ### SpanID @@ -1068,6 +1796,33 @@ Examples: - `Split("A|B|C", "|")` +### String + +`String(value)` + +The `String` Converter converts the `value` to string type. + +The returned type is `string`. + +- string. The function returns the `value` without changes. +- []byte. The function returns the `value` as a string encoded in hexadecimal. +- map. The function returns the `value` as a key-value-pair of type string. +- slice. The function returns the `value` as a list formatted string. +- pcommon.Value. The function returns the `value` as a string type. + +If `value` is of another type it gets marshalled to string type. +If `value` is empty, or parsing failed, nil is always returned. + +The `value` is either a path expression to a telemetry field to retrieve, or a literal. + +Examples: + +- `String("test")` +- `String(attributes["http.method"])` +- `String(span_id)` +- `String([1,2,3])` +- `String(false)` + ### Substring `Substring(target, start, length)` @@ -1085,15 +1840,122 @@ Examples: ### Time -The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. +`Time(target, format, Optional[location], Optional[locale])` -`time` is a string. `format` is a string. +The `Time` Converter takes a string representation of a time and converts it to a Golang `time.Time`. -If either `time` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the time and format do not follow the parsing rules used by this parser, an error is returned. +`target` is a string. `format` is a string, `location` is an optional string, `locale` is an optional string. + +If either `target` or `format` are nil, an error is returned. The parser used is the parser at [internal/coreinternal/parser](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/internal/coreinternal/timeutils). If the `target` and `format` do not follow the parsing rules used by this parser, an error is returned. + +`format` denotes a textual representation of the time value formatted according to ctime-like format string. It follows [standard Go Layout formatting](https://pkg.go.dev/time#pkg-constants) with few additional substitutes: +| substitution | description | examples | +|-----|-----|-----| +|`%Y` | Year as a zero-padded number | 0001, 0002, ..., 2019, 2020, ..., 9999 | +|`%y` | Year, last two digits as a zero-padded number | 01, ..., 99 | +|`%m` | Month as a zero-padded number | 01, 02, ..., 12 | +|`%o` | Month as a space-padded number | 1, 2, ..., 12 | +|`%q` | Month as an unpadded number | 1,2,...,12 | +|`%b`, `%h` | Abbreviated month name | Jan, Feb, ... | +|`%B` | Full month name | January, February, ... | +|`%d` | Day of the month as a zero-padded number | 01, 02, ..., 31 | +|`%e` | Day of the month as a space-padded number| 1, 2, ..., 31 | +|`%g` | Day of the month as a unpadded number | 1,2,...,31 | +|`%a` | Abbreviated weekday name | Sun, Mon, ... | +|`%A` | Full weekday name | Sunday, Monday, ... | +|`%H` | Hour (24-hour clock) as a zero-padded number | 00, ..., 24 | +|`%I` | Hour (12-hour clock) as a zero-padded number | 00, ..., 12 | +|`%l` | Hour 12-hour clock | 0, ..., 24 | +|`%p` | Locale’s equivalent of either AM or PM | AM, PM | +|`%P` | Locale’s equivalent of either am or pm | am, pm | +|`%M` | Minute as a zero-padded number | 00, 01, ..., 59 | +|`%S` | Second as a zero-padded number | 00, 01, ..., 59 | +|`%L` | Millisecond as a zero-padded number | 000, 001, ..., 999 | +|`%f` | Microsecond as a zero-padded number | 000000, ..., 999999 | +|`%s` | Nanosecond as a zero-padded number | 00000000, ..., 99999999 | +|`%z` | UTC offset in the form ±HHMM[SS[.ffffff]] or empty | +0000, -0400 | +|`%Z` | Timezone name or abbreviation or empty | UTC, EST, CST | +|`%i` | Timezone as +/-HH | -07 | +|`%j` | Timezone as +/-HH:MM | -07:00 | +|`%k` | Timezone as +/-HH:MM:SS | -07:00:00 | +|`%w` | Timezone as +/-HHMMSS | -070000 | +|`%D`, `%x` | Short MM/DD/YYYY date, equivalent to %m/%d/%y | 01/21/2031 | +|`%F` | Short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2031-01-21 | +|`%T`,`%X` | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 02:55:02 | +|`%r` | 12-hour clock time | 02:55:02 pm | +|`%R` | 24-hour HH:MM time, equivalent to %H:%M | 13:55 | +|`%n` | New-line character ('\n') | | +|`%t` | Horizontal-tab character ('\t') | | +|`%%` | A % sign | | +|`%c` | Date and time representation | Mon Jan 02 15:04:05 2006 | + +`location` specifies a default time zone canonical ID to be used for date parsing in case it is not part of `format`. + +When loading `location`, this function will look for the IANA Time Zone database in the following locations in order: +- a directory or uncompressed zip file named by the ZONEINFO environment variable +- on a Unix system, the system standard installation location +- $GOROOT/lib/time/zoneinfo.zip +- the `time/tzdata` package, if it was imported. + +When building a Collector binary, importing `time/tzdata` in any Go source file will bundle the database into the binary, which guarantees the lookups will work regardless of the setup on the host setup. Note this will add roughly 500kB to binary size. Examples: - `Time("02/04/2023", "%m/%d/%Y")` +- `Time("Feb 15, 2023", "%b %d, %Y")` +- `Time("2023-05-26 12:34:56 HST", "%Y-%m-%d %H:%M:%S %Z")` +- `Time("1986-10-01T00:17:33 MST", "%Y-%m-%dT%H:%M:%S %Z")` +- `Time("2012-11-01T22:08:41+0000 EST", "%Y-%m-%dT%H:%M:%S%z %Z")` +- `Time("2023-05-26 12:34:56", "%Y-%m-%d %H:%M:%S", "America/New_York")` + +`locale` specifies the input language of the `target` value. It is used to interpret timestamp values written in a specific language, +ensuring that the function can correctly parse the localized month names, day names, and periods of the day based on the provided language. + +The value must be a well-formed BCP 47 language tag, and a known [CLDR](https://cldr.unicode.org) v45 locale. +If not supplied, English (`en`) is used. + +Examples: + +- `Time("mercoledì set 4 2024", "%A %h %e %Y", "", "it")` +- `Time("Febrero 25 lunes, 2002, 02:03:04 p.m.", "%B %d %A, %Y, %r", "America/New_York", "es-ES")` + +### ToKeyValueString + +`ToKeyValueString(target, Optional[delimiter], Optional[pair_delimiter], Optional[sort_output])` + +The `ToKeyValueString` Converter takes a `pcommon.Map` and converts it to a `string` of key value pairs. + +- `target` is a Getter that returns a `pcommon.Map`. +- `delimiter` is an optional string that is used to join keys and values, the default is `=`. +- `pair_delimiter` is an optional string that is used to join key value pairs, the default is a single space (` `). +- `sort_output` is an optional bool that is used to deterministically sort the keys of the output string. It should only be used if the output is required to be in the same order each time, as it introduces some performance overhead. + +For example, the following map `{"k1":"v1","k2":"v2","k3":"v3"}` will use default delimiters and be converted into the following string: + +``` +`k1=v1 k2=v2 k3=v3` +``` + +**Note:** Any nested arrays or maps will be represented as a JSON string. It is recommended to [flatten](#flatten) `target` before using this function. + +For example, `{"k1":"v1","k2":{"k3":"v3","k4":["v4","v5"]}}` will be converted to: + +``` +`k1=v1 k2={\"k3\":\"v3\",\"k4\":[\"v4\",\"v5\"]}` +``` + +**Note:** If any keys or values contain either delimiter, they will be double quoted. If any double quotes are present in the quoted value, they will be escaped. + +For example, `{"k1":"v1","k2":"v=2","k3"="\"v=3\""}` will be converted to: + +``` +`k1=v1 k2="v=2" k3="\"v=3\""` +``` + +Examples: + +- `ToKeyValueString(body)` +- `ToKeyValueString(body, ":", ",", true)` ### TraceID @@ -1121,6 +1983,21 @@ Examples: - `TruncateTime(start_time, Duration("1s"))` +### Unix + +`Unix(seconds, Optional[nanoseconds])` + +The `Unix` Converter returns an epoch timestamp as a Unix time. Similar to [Golang's Unix function](https://pkg.go.dev/time#Unix). + +`seconds` is `int64`. If `seconds` is another type an error is returned. +`nanoseconds` is `int64`. It is optional and its default value is 0. If `nanoseconds` is another type an error is returned. + +The returned type is `time.Time`. + +Examples: + +- `Unix(1672527600)` + ### UnixMicro `UnixMicro(value)` @@ -1177,19 +2054,88 @@ Examples: - `UnixSeconds(Time("02/04/2023", "%m/%d/%Y"))` +### UserAgent + +`UserAgent(value)` + +The `UserAgent` Converter parses the string argument trying to match it against well-known user-agent strings. + +`value` is a string or a path to a string. If `value` is not a string an error is returned. + +The results of the parsing are returned as a map containing `user_agent.name`, `user_agent.version` and `user_agent.original` +as defined in semconv v1.25.0. + +Parsing is done using the [uap-go package](https://github.com/ua-parser/uap-go). The specific formats it recognizes can be found [here](https://github.com/ua-parser/uap-core/blob/master/regexes.yaml). + +Examples: + +- `UserAgent("curl/7.81.0")` + ```yaml + "user_agent.name": "curl" + "user_agent.version": "7.81.0" + "user_agent.original": "curl/7.81.0" + ``` +- `Mozilla/5.0 (X11; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0` + ```yaml + "user_agent.name": "Firefox" + "user_agent.version": "126.0" + "user_agent.original": "Mozilla/5.0 (X11; Linux x86_64; rv:126.0) Gecko/20100101 Firefox/126.0" + ``` + +### URL + +`URL(url_string)` + +Parses a Uniform Resource Locator (URL) string and extracts its components as an object. +This URL object includes properties for the URL’s domain, path, fragment, port, query, scheme, user info, username, and password. + +`original`, `domain`, `scheme`, and `path` are always present. Other properties are present only if they have corresponding values. + +`url_string` is a `string`. + +- `URL("http://www.example.com")` + +results in +``` + "url.original": "http://www.example.com", + "url.scheme": "http", + "url.domain": "www.example.com", + "url.path": "", +``` + +- `URL("http://myusername:mypassword@www.example.com:80/foo.gif?key1=val1&key2=val2#fragment")` + +results in +``` + "url.path": "/foo.gif", + "url.fragment": "fragment", + "url.extension": "gif", + "url.password": "mypassword", + "url.original": "http://myusername:mypassword@www.example.com:80/foo.gif?key1=val1&key2=val2#fragment", + "url.scheme": "http", + "url.port": 80, + "url.user_info": "myusername:mypassword", + "url.domain": "www.example.com", + "url.query": "key1=val1&key2=val2", + "url.username": "myusername", +``` + ### UUID `UUID()` The `UUID` function generates a v4 uuid string. -## Function syntax +### Year -Functions should be named and formatted according to the following standards. +`Year(value)` + +The `Year` Converter returns the year component from the specified time using the Go stdlib [`time.Year` function](https://pkg.go.dev/time#Time.Year). + +`value` is a `time.Time`. If `value` is another type, an error is returned. + +The returned type is `int64`. + +Examples: -- Function names MUST start with a verb unless it is a Factory that creates a new type. -- Converters MUST be UpperCamelCase. -- Function names that contain multiple words MUST separate those words with `_`. -- Functions that interact with multiple items MUST have plurality in the name. Ex: `truncate_all`, `keep_keys`, `replace_all_matches`. -- Functions that interact with a single item MUST NOT have plurality in the name. If a function would interact with multiple items due to a condition, like `where`, it is still considered singular. Ex: `set`, `delete`, `replace_match`. -- Functions that change a specific target MUST set the target as the first parameter. +- `Year(Now())` diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go new file mode 100644 index 00000000000..3bf6cb2a902 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_append.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type AppendArguments[K any] struct { + Target ottl.GetSetter[K] + Value ottl.Optional[ottl.Getter[K]] + Values ottl.Optional[[]ottl.Getter[K]] +} + +func NewAppendFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("append", &AppendArguments[K]{}, createAppendFunction[K]) +} + +func createAppendFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*AppendArguments[K]) + if !ok { + return nil, fmt.Errorf("AppendFactory args must be of type *Appendrguments[K]") + } + + return appendTo(args.Target, args.Value, args.Values) +} + +func appendTo[K any](target ottl.GetSetter[K], value ottl.Optional[ottl.Getter[K]], values ottl.Optional[[]ottl.Getter[K]]) (ottl.ExprFunc[K], error) { + if value.IsEmpty() && values.IsEmpty() { + return nil, fmt.Errorf("at least one of the optional arguments ('value' or 'values') must be provided") + } + + return func(ctx context.Context, tCtx K) (any, error) { + t, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + // init res with target values + var res []any + + if t != nil { + switch targetType := t.(type) { + case pcommon.Slice: + res = append(res, targetType.AsRaw()...) + case pcommon.Value: + switch targetType.Type() { + case pcommon.ValueTypeEmpty: + res = append(res, targetType.Str()) + case pcommon.ValueTypeStr: + res = append(res, targetType.Str()) + case pcommon.ValueTypeInt: + res = append(res, targetType.Int()) + case pcommon.ValueTypeDouble: + res = append(res, targetType.Double()) + case pcommon.ValueTypeBool: + res = append(res, targetType.Bool()) + case pcommon.ValueTypeSlice: + res = append(res, targetType.Slice().AsRaw()...) + default: + return nil, fmt.Errorf("unsupported type of target field: %q", targetType.Type()) + } + + case []string: + res = appendMultiple(res, targetType) + case []any: + res = append(res, targetType...) + case []int64: + res = appendMultiple(res, targetType) + case []bool: + res = appendMultiple(res, targetType) + case []float64: + res = appendMultiple(res, targetType) + + case string: + res = append(res, targetType) + case int64: + res = append(res, targetType) + case bool: + res = append(res, targetType) + case float64: + res = append(res, targetType) + case any: + res = append(res, targetType) + default: + return nil, fmt.Errorf("unsupported type of target field: '%T'", t) + } + } + + appendGetterFn := func(g ottl.Getter[K]) error { + v, err := g.Get(ctx, tCtx) + if err != nil { + return err + } + res = append(res, v) + return nil + } + + if !value.IsEmpty() { + getter := value.Get() + if err := appendGetterFn(getter); err != nil { + return nil, err + } + } + if !values.IsEmpty() { + getters := values.Get() + for _, g := range getters { + if err := appendGetterFn(g); err != nil { + return nil, err + } + } + } + + // retype []any to Slice, having []any sometimes misbehaves and nils pcommon.Value + resSlice := pcommon.NewSlice() + if err := resSlice.FromRaw(res); err != nil { + return nil, err + } + + return nil, target.Set(ctx, tCtx, resSlice) + }, nil +} + +func appendMultiple[K any](target []any, values []K) []any { + for _, v := range values { + target = append(target, v) + } + return target +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go index 42e401d7150..f626adc3b55 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_base64decode.go @@ -30,7 +30,6 @@ func createBase64DecodeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Argume } func Base64Decode[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go new file mode 100644 index 00000000000..64d4ecc5fde --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_attributes_to_elements_xml.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ConvertAttributesToElementsXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath ottl.Optional[string] +} + +func NewConvertAttributesToElementsXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ConvertAttributesToElementsXML", &ConvertAttributesToElementsXMLArguments[K]{}, createConvertAttributesToElementsXMLFunction[K]) +} + +func createConvertAttributesToElementsXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ConvertAttributesToElementsXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("ConvertAttributesToElementsXML args must be of type *ConvertAttributesToElementsXMLAguments[K]") + } + + xPath := args.XPath.Get() + if xPath == "" { + xPath = "//@*" // All attributes in the document + } + if err := validateXPath(xPath); err != nil { + return nil, err + } + + return convertAttributesToElementsXML(args.Target, xPath), nil +} + +// convertAttributesToElementsXML returns a string that is a result of converting all attributes of the +// target XML into child elements. These new elements are added as the last child elements of the parent. +// e.g. -> worldbar +func convertAttributesToElementsXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + for _, n := range xmlquery.Find(doc, xPath) { + if n.Type != xmlquery.AttributeNode { + continue + } + xmlquery.AddChild(n.Parent, &xmlquery.Node{ + Type: xmlquery.ElementNode, + Data: n.Data, + FirstChild: &xmlquery.Node{ + Type: xmlquery.TextNode, + Data: n.InnerText(), + }, + }) + n.Parent.RemoveAttr(n.Data) + } + return doc.OutputXML(false), nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go new file mode 100644 index 00000000000..a0fb108c406 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_text_to_elements_xml.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ConvertTextToElementsXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath ottl.Optional[string] + ElementName ottl.Optional[string] +} + +func NewConvertTextToElementsXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ConvertTextToElementsXML", &ConvertTextToElementsXMLArguments[K]{}, createConvertTextToElementsXMLFunction[K]) +} + +func createConvertTextToElementsXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ConvertTextToElementsXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("ConvertTextToElementsXML args must be of type *ConvertTextToElementsXMLAguments[K]") + } + + xPath := args.XPath.Get() + if xPath == "" { + xPath = "/" + } else if err := validateXPath(xPath); err != nil { + return nil, err + } + + elementName := args.ElementName.Get() + if elementName == "" { + elementName = "value" + } + + return convertTextToElementsXML(args.Target, xPath, elementName), nil +} + +// convertTextToElementsXML returns a string that is a result of wrapping any extraneous text nodes with a dedicated element. +func convertTextToElementsXML[K any](target ottl.StringGetter[K], xPath string, elementName string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + for _, n := range xmlquery.Find(doc, xPath) { + convertTextToElementsForNode(n, elementName) + } + return doc.OutputXML(false), nil + } +} + +func convertTextToElementsForNode(parent *xmlquery.Node, elementName string) { + switch parent.Type { + case xmlquery.ElementNode: // ok + case xmlquery.DocumentNode: // ok + default: + return + } + + if parent.FirstChild == nil { + return + } + + // Convert any child nodes and count text and element nodes. + var valueCount, elementCount int + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type == xmlquery.ElementNode { + convertTextToElementsForNode(child, elementName) + elementCount++ + } else if child.Type == xmlquery.TextNode { + valueCount++ + } + } + + // If there are no values to wrap, or if there is exactly one value OR one element, this node is all set. + if valueCount == 0 || elementCount+valueCount <= 1 { + return + } + + // At this point, we either have multiple values, or a mix of values and elements. + // Either way, we need to wrap the values. + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type != xmlquery.TextNode { + continue + } + newTextNode := &xmlquery.Node{ + Type: xmlquery.TextNode, + Data: child.Data, + } + // Change this node into an element + child.Type = xmlquery.ElementNode + child.Data = elementName + child.FirstChild = newTextNode + child.LastChild = newTextNode + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go new file mode 100644 index 00000000000..ecdd0ceb2d4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_day.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type DayArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewDayFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Day", &DayArguments[K]{}, createDayFunction[K]) +} + +func createDayFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*DayArguments[K]) + + if !ok { + return nil, fmt.Errorf("DayFactory args must be of type *DayArguments[K]") + } + + return Day(args.Time) +} + +func Day[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Day()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go new file mode 100644 index 00000000000..d6dc5efc036 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_decode.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "encoding/base64" + "fmt" + "strings" + + "go.opentelemetry.io/collector/pdata/pcommon" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/ianaindex" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/textutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type DecodeArguments[K any] struct { + Target ottl.Getter[K] + Encoding string +} + +func NewDecodeFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Decode", &DecodeArguments[K]{}, createDecodeFunction[K]) +} + +func createDecodeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*DecodeArguments[K]) + if !ok { + return nil, fmt.Errorf("DecodeFactory args must be of type *DecodeArguments[K]") + } + + return Decode(args.Target, args.Encoding) +} + +func Decode[K any](target ottl.Getter[K], encoding string) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + var stringValue string + + switch v := val.(type) { + case []byte: + stringValue = string(v) + case *string: + stringValue = *v + case string: + stringValue = v + case pcommon.ByteSlice: + stringValue = string(v.AsRaw()) + case *pcommon.ByteSlice: + stringValue = string(v.AsRaw()) + case pcommon.Value: + stringValue = v.AsString() + case *pcommon.Value: + stringValue = v.AsString() + default: + return nil, fmt.Errorf("unsupported type provided to Decode function: %T", v) + } + + switch encoding { + case "base64": + // base64 is not in IANA index, so we have to deal with this encoding separately + decodedBytes, err := base64.StdEncoding.DecodeString(stringValue) + if err != nil { + return nil, fmt.Errorf("could not decode: %w", err) + } + return string(decodedBytes), nil + default: + e, err := getEncoding(encoding) + if err != nil { + return nil, err + } + + decodedString, err := e.NewDecoder().String(stringValue) + if err != nil { + return nil, fmt.Errorf("could not decode: %w", err) + } + + return decodedString, nil + } + }, nil +} + +func getEncoding(encoding string) (encoding.Encoding, error) { + if e, ok := textutils.EncodingOverridesMap.Get(strings.ToLower(encoding)); ok { + return e, nil + } + e, err := ianaindex.IANA.Encoding(encoding) + if err != nil { + return nil, fmt.Errorf("could not get encoding for %s: %w", encoding, err) + } + if e == nil { + // for some encodings a nil error and a nil encoding is returned, so we need to double check + // if the encoding is actually set here + return nil, fmt.Errorf("no decoder available for encoding: %s", encoding) + } + return e, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go index e125ff2b90d..27138583fa3 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_duration.go @@ -18,6 +18,7 @@ type DurationArguments[K any] struct { func NewDurationFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Duration", &DurationArguments[K]{}, createDurationFunction[K]) } + func createDurationFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*DurationArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go new file mode 100644 index 00000000000..78f50866a0c --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_extract_grok_patterns.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "strings" + + "github.com/elastic/go-grok" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ExtractGrokPatternsArguments[K any] struct { + Target ottl.StringGetter[K] + Pattern string + NamedCapturesOnly ottl.Optional[bool] + PatternDefinitions ottl.Optional[[]string] +} + +func NewExtractGrokPatternsFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ExtractGrokPatterns", &ExtractGrokPatternsArguments[K]{}, createExtractGrokPatternsFunction[K]) +} + +func createExtractGrokPatternsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ExtractGrokPatternsArguments[K]) + + if !ok { + return nil, fmt.Errorf("ExtractGrokPatternsFactory args must be of type *ExtractGrokPatternsArguments[K]") + } + + return extractGrokPatterns(args.Target, args.Pattern, args.NamedCapturesOnly, args.PatternDefinitions) +} + +func extractGrokPatterns[K any](target ottl.StringGetter[K], pattern string, nco ottl.Optional[bool], patternDefinitions ottl.Optional[[]string]) (ottl.ExprFunc[K], error) { + g, err := grok.NewComplete() + if err != nil { + return nil, fmt.Errorf("failed to initialize grok parser: %w", err) + } + namedCapturesOnly := !nco.IsEmpty() && nco.Get() + + if !patternDefinitions.IsEmpty() { + for i, patternDefinition := range patternDefinitions.Get() { + // split pattern in format key=val + parts := strings.SplitN(patternDefinition, "=", 2) + if len(parts) == 1 { + trimmedPattern := patternDefinition + if len(patternDefinition) > 20 { + trimmedPattern = fmt.Sprintf("%s...", patternDefinition[:17]) // keep whole string 20 characters long including ... + } + return nil, fmt.Errorf("pattern %q supplied to ExtractGrokPatterns at index %d has incorrect format, expecting PATTERNNAME=pattern definition", trimmedPattern, i) + } + + if strings.ContainsRune(parts[0], ':') { + return nil, fmt.Errorf("pattern ID %q should not contain ':'", parts[0]) + } + + err = g.AddPattern(parts[0], parts[1]) + if err != nil { + return nil, fmt.Errorf("failed to add pattern %q=%q: %w", parts[0], parts[1], err) + } + } + } + err = g.Compile(pattern, namedCapturesOnly) + if err != nil { + return nil, fmt.Errorf("the pattern supplied to ExtractGrokPatterns is not a valid pattern: %w", err) + } + + if namedCapturesOnly && !g.HasCaptureGroups() { + return nil, fmt.Errorf("at least 1 named capture group must be supplied in the given regex") + } + + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + matches, err := g.ParseTypedString(val) + if err != nil { + return nil, err + } + + result := pcommon.NewMap() + for k, v := range matches { + switch val := v.(type) { + case bool: + result.PutBool(k, val) + case float64: + result.PutDouble(k, val) + case int: + result.PutInt(k, int64(val)) + case string: + result.PutStr(k, val) + } + } + + return result, err + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go index 44a97b8095b..709eb1bef74 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_flatten.go @@ -37,8 +37,8 @@ func flatten[K any](target ottl.PMapGetter[K], p ottl.Optional[string], d ottl.O depth := int64(math.MaxInt64) if !d.IsEmpty() { depth = d.Get() - if depth < 0 { - return nil, fmt.Errorf("invalid depth for flatten function, %d cannot be negative", depth) + if depth < 1 { + return nil, fmt.Errorf("invalid depth '%d' for flatten function, must be greater than 0", depth) } } @@ -55,7 +55,7 @@ func flatten[K any](target ottl.PMapGetter[K], p ottl.Optional[string], d ottl.O result := pcommon.NewMap() flattenHelper(m, result, prefix, 0, depth) - result.CopyTo(m) + result.MoveTo(m) return nil, nil }, nil @@ -69,7 +69,7 @@ func flattenHelper(m pcommon.Map, result pcommon.Map, prefix string, currentDept switch { case v.Type() == pcommon.ValueTypeMap && currentDepth < maxDepth: flattenHelper(v.Map(), result, prefix+k, currentDepth+1, maxDepth) - case v.Type() == pcommon.ValueTypeSlice: + case v.Type() == pcommon.ValueTypeSlice && currentDepth < maxDepth: for i := 0; i < v.Slice().Len(); i++ { v.Slice().At(i).CopyTo(result.PutEmpty(fmt.Sprintf("%v.%v", prefix+k, i))) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go index 5df53a4737e..c9a29ca5739 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_fnv.go @@ -30,7 +30,6 @@ func createFnvFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ott } func FNVHashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go new file mode 100644 index 00000000000..86f3dc83046 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_format.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type FormatArguments[K any] struct { + Format string + Vals []ottl.Getter[K] +} + +func NewFormatFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Format", &FormatArguments[K]{}, createFormatFunction[K]) +} + +func createFormatFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*FormatArguments[K]) + if !ok { + return nil, fmt.Errorf("FormatFactory args must be of type *FormatArguments[K]") + } + + return format(args.Format, args.Vals), nil +} + +func format[K any](formatString string, vals []ottl.Getter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + formatArgs := make([]any, 0, len(vals)) + for _, arg := range vals { + formatArg, err := arg.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + formatArgs = append(formatArgs, formatArg) + } + + return fmt.Sprintf(formatString, formatArgs...), nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go new file mode 100644 index 00000000000..c344dd5b8ef --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_get_xml.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type GetXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath string +} + +func NewGetXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("GetXML", &GetXMLArguments[K]{}, createGetXMLFunction[K]) +} + +func createGetXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*GetXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("GetXML args must be of type *GetXMLAguments[K]") + } + + if err := validateXPath(args.XPath); err != nil { + return nil, err + } + + return getXML(args.Target, args.XPath), nil +} + +// getXML returns a XML formatted string that is a result of matching elements from the target XML. +func getXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + nodes, err := xmlquery.QueryAll(doc, xPath) + if err != nil { + return nil, err + } + + result := &xmlquery.Node{Type: xmlquery.DocumentNode} + for _, n := range nodes { + switch n.Type { + case xmlquery.ElementNode, xmlquery.TextNode: + xmlquery.AddChild(result, n) + case xmlquery.AttributeNode, xmlquery.CharDataNode: + // get the value + xmlquery.AddChild(result, &xmlquery.Node{ + Type: xmlquery.TextNode, + Data: n.InnerText(), + }) + default: + continue + } + } + return result.OutputXML(false), nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go new file mode 100644 index 00000000000..dd87e188779 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hex.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type HexArguments[K any] struct { + Target ottl.ByteSliceLikeGetter[K] +} + +func NewHexFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Hex", &HexArguments[K]{}, createHexFunction[K]) +} + +func createHexFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*HexArguments[K]) + + if !ok { + return nil, fmt.Errorf("HexFactory args must be of type *HexArguments[K]") + } + + return Hex(args.Target) +} + +func Hex[K any](target ottl.ByteSliceLikeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + value, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return hex.EncodeToString(value), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go index 3e30fbd1087..5bc3e89600a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hour.go @@ -17,6 +17,7 @@ type HourArguments[K any] struct { func NewHourFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Hour", &HourArguments[K]{}, createHourFunction[K]) } + func createHourFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*HourArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go index 66ab2dbcad1..580091ae4be 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_hours.go @@ -17,6 +17,7 @@ type HoursArguments[K any] struct { func NewHoursFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Hours", &HoursArguments[K]{}, createHoursFunction[K]) } + func createHoursFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*HoursArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go new file mode 100644 index 00000000000..778b16938a0 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_insert_xml.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "errors" + "fmt" + + "github.com/antchfx/xmlquery" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type InsertXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath string + SubDocument ottl.StringGetter[K] +} + +func NewInsertXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("InsertXML", &InsertXMLArguments[K]{}, createInsertXMLFunction[K]) +} + +func createInsertXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*InsertXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("InsertXML args must be of type *InsertXMLAguments[K]") + } + + if err := validateXPath(args.XPath); err != nil { + return nil, err + } + + return insertXML(args.Target, args.XPath, args.SubDocument), nil +} + +// insertXML returns a XML formatted string that is a result of inserting another XML document into +// the content of each selected target element. +func insertXML[K any](target ottl.StringGetter[K], xPath string, subGetter ottl.StringGetter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + var subDoc *xmlquery.Node + if subDocVal, err := subGetter.Get(ctx, tCtx); err != nil { + return nil, err + } else if subDoc, err = parseNodesXML(subDocVal); err != nil { + return nil, err + } + + nodes, errs := xmlquery.QueryAll(doc, xPath) + for _, n := range nodes { + switch n.Type { + case xmlquery.ElementNode, xmlquery.DocumentNode: + var nextSibling *xmlquery.Node + for c := subDoc.FirstChild; c != nil; c = nextSibling { + // AddChild updates c.NextSibling but not subDoc.FirstChild + // so we need to get the handle to it prior to the update. + nextSibling = c.NextSibling + xmlquery.AddChild(n, c) + } + default: + errs = errors.Join(errs, fmt.Errorf("InsertXML XPath selected non-element: %q", n.Data)) + } + } + return doc.OutputXML(false), errs + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go new file mode 100644 index 00000000000..137ccb47070 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_list.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type IsListArguments[K any] struct { + Target ottl.Getter[K] +} + +func NewIsListFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("IsList", &IsListArguments[K]{}, createIsListFunction[K]) +} + +func createIsListFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*IsListArguments[K]) + + if !ok { + return nil, fmt.Errorf("IsListFactory args must be of type *IsListArguments[K]") + } + + return isList(args.Target), nil +} + +func isList[K any](target ottl.Getter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return false, err + } + + switch valType := val.(type) { + case pcommon.Value: + return valType.Type() == pcommon.ValueTypeSlice, nil + + case pcommon.Slice, plog.LogRecordSlice, plog.ResourceLogsSlice, plog.ScopeLogsSlice, pmetric.ExemplarSlice, pmetric.ExponentialHistogramDataPointSlice, pmetric.HistogramDataPointSlice, pmetric.MetricSlice, pmetric.NumberDataPointSlice, pmetric.ResourceMetricsSlice, pmetric.ScopeMetricsSlice, pmetric.SummaryDataPointSlice, pmetric.SummaryDataPointValueAtQuantileSlice, ptrace.ResourceSpansSlice, ptrace.ScopeSpansSlice, ptrace.SpanEventSlice, ptrace.SpanLinkSlice, ptrace.SpanSlice, []string, []bool, []int64, []float64, [][]byte, []any: + return true, nil + } + + return false, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go new file mode 100644 index 00000000000..1aaba5a7bb4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_root_span.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" +) + +func NewIsRootSpanFactory() ottl.Factory[ottlspan.TransformContext] { + return ottl.NewFactory("IsRootSpan", nil, createIsRootSpanFunction) +} + +func createIsRootSpanFunction(_ ottl.FunctionContext, _ ottl.Arguments) (ottl.ExprFunc[ottlspan.TransformContext], error) { + return isRootSpan() +} + +func isRootSpan() (ottl.ExprFunc[ottlspan.TransformContext], error) { + return func(_ context.Context, tCtx ottlspan.TransformContext) (any, error) { + return tCtx.GetSpan().ParentSpanID().IsEmpty(), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go index 6cae0c4c379..8370414c6de 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go @@ -42,7 +42,7 @@ func keepKeys[K any](target ottl.PMapGetter[K], keys []string) ottl.ExprFunc[K] if err != nil { return nil, err } - val.RemoveIf(func(key string, value pcommon.Value) bool { + val.RemoveIf(func(key string, _ pcommon.Value) bool { _, ok := keySet[key] return !ok }) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go new file mode 100644 index 00000000000..c5878d46eff --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_matching_keys.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +import ( + "fmt" + "regexp" + + "go.opentelemetry.io/collector/pdata/pcommon" + "golang.org/x/net/context" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type KeepMatchingKeysArguments[K any] struct { + Target ottl.PMapGetter[K] + Pattern string +} + +func NewKeepMatchingKeysFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("keep_matching_keys", &KeepMatchingKeysArguments[K]{}, createKeepMatchingKeysFunction[K]) +} + +func createKeepMatchingKeysFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*KeepMatchingKeysArguments[K]) + + if !ok { + return nil, fmt.Errorf("KeepMatchingKeysFactory args must be of type *KeepMatchingKeysArguments[K") + } + + return keepMatchingKeys(args.Target, args.Pattern) +} + +func keepMatchingKeys[K any](target ottl.PMapGetter[K], pattern string) (ottl.ExprFunc[K], error) { + compiledPattern, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("the regex pattern provided to keep_matching_keys is not a valid pattern: %w", err) + } + + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + val.RemoveIf(func(key string, _ pcommon.Value) bool { + return !compiledPattern.MatchString(key) + }) + return nil, nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go index 81c3a4a7f4b..0010c0e3d5e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go @@ -64,7 +64,7 @@ func limit[K any](target ottl.PMapGetter[K], limit int64, priorityKeys []string) } } - val.RemoveIf(func(key string, value pcommon.Value) bool { + val.RemoveIf(func(key string, _ pcommon.Value) bool { if _, ok := keep[key]; ok { return false } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go new file mode 100644 index 00000000000..280ac5c8bcb --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_md5.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "crypto/md5" // #nosec + "encoding/hex" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type MD5Arguments[K any] struct { + Target ottl.StringGetter[K] +} + +func NewMD5Factory[K any]() ottl.Factory[K] { + return ottl.NewFactory("MD5", &MD5Arguments[K]{}, createMD5Function[K]) +} + +func createMD5Function[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*MD5Arguments[K]) + + if !ok { + return nil, fmt.Errorf("MD5Factory args must be of type *MD5Arguments[K]") + } + + return MD5HashString(args.Target) +} + +func MD5HashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + val, err := target.Get(ctx, tCtx) + if err != nil { + return nil, err + } + hash := md5.New() // #nosec + _, err = hash.Write([]byte(val)) + if err != nil { + return nil, err + } + return hex.EncodeToString(hash.Sum(nil)), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go index 0855efaca05..f76616782b9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_microseconds.go @@ -17,6 +17,7 @@ type MicrosecondsArguments[K any] struct { func NewMicrosecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Microseconds", &MicrosecondsArguments[K]{}, createMicrosecondsFunction[K]) } + func createMicrosecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*MicrosecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go index da787e0e157..f0d605d5ee5 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_milliseconds.go @@ -17,6 +17,7 @@ type MillisecondsArguments[K any] struct { func NewMillisecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Milliseconds", &MillisecondsArguments[K]{}, createMillisecondsFunction[K]) } + func createMillisecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*MillisecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go new file mode 100644 index 00000000000..f2ce05a179d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minute.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type MinuteArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewMinuteFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Minute", &MinuteArguments[K]{}, createMinuteFunction[K]) +} + +func createMinuteFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*MinuteArguments[K]) + + if !ok { + return nil, fmt.Errorf("MinuteFactory args must be of type *MinuteArguments[K]") + } + + return Minute(args.Time) +} + +func Minute[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Minute()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go index 5048befc572..557fa972ed4 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_minutes.go @@ -17,6 +17,7 @@ type MinutesArguments[K any] struct { func NewMinutesFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Minutes", &MinutesArguments[K]{}, createMinutesFunction[K]) } + func createMinutesFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*MinutesArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go new file mode 100644 index 00000000000..a4ab013d111 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_month.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type MonthArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewMonthFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Month", &MonthArguments[K]{}, createMonthFunction[K]) +} + +func createMonthFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*MonthArguments[K]) + + if !ok { + return nil, fmt.Errorf("MonthFactory args must be of type *MonthArguments[K]") + } + + return Month(args.Time) +} + +func Month[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Month()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go index 2bb2cd7d1dd..32769626750 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_nanoseconds.go @@ -17,6 +17,7 @@ type NanosecondsArguments[K any] struct { func NewNanosecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("Nanoseconds", &NanosecondsArguments[K]{}, createNanosecondsFunction[K]) } + func createNanosecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*NanosecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go index 51815d3f686..698a274a373 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_now.go @@ -11,7 +11,7 @@ import ( ) func now[K any]() (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { + return func(_ context.Context, _ K) (any, error) { return time.Now(), nil }, nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go index 8cca75f7160..011437424c1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go @@ -7,7 +7,7 @@ import ( "context" "fmt" - jsoniter "github.com/json-iterator/go" + "github.com/goccy/go-json" "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" @@ -31,7 +31,7 @@ func createParseJSONFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments return parseJSON(args.Target), nil } -// parseJSON returns a `pcommon.Map` struct that is a result of parsing the target string as JSON +// parseJSON returns a `pcommon.Map` or `pcommon.Slice` struct that is a result of parsing the target string as JSON // Each JSON type is converted into a `pdata.Value` using the following map: // // JSON boolean -> bool @@ -46,13 +46,22 @@ func parseJSON[K any](target ottl.StringGetter[K]) ottl.ExprFunc[K] { if err != nil { return nil, err } - var parsedValue map[string]any - err = jsoniter.UnmarshalFromString(targetVal, &parsedValue) + var parsedValue any + err = json.Unmarshal([]byte(targetVal), &parsedValue) if err != nil { return nil, err } - result := pcommon.NewMap() - err = result.FromRaw(parsedValue) - return result, err + switch v := parsedValue.(type) { + case []any: + result := pcommon.NewSlice() + err = result.FromRaw(v) + return result, err + case map[string]any: + result := pcommon.NewMap() + err = result.FromRaw(v) + return result, err + default: + return nil, fmt.Errorf("could not convert parsed value of type %T to JSON object", v) + } } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go new file mode 100644 index 00000000000..7e4f1e2753f --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_simplified_xml.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/antchfx/xmlquery" + "go.opentelemetry.io/collector/pdata/pcommon" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type ParseSimplifiedXMLArguments[K any] struct { + Target ottl.StringGetter[K] +} + +func NewParseSimplifiedXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("ParseSimplifiedXML", &ParseSimplifiedXMLArguments[K]{}, createParseSimplifiedXMLFunction[K]) +} + +func createParseSimplifiedXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*ParseSimplifiedXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("ParseSimplifiedXML args must be of type *ParseSimplifiedXMLAguments[K]") + } + + return parseSimplifiedXML(args.Target), nil +} + +// The `ParseSimplifiedXML` Converter returns a `pcommon.Map` struct that is the result of parsing the target +// string without preservation of attributes or extraneous text content. +func parseSimplifiedXML[K any](target ottl.StringGetter[K]) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + docMap := pcommon.NewMap() + parseElement(doc, &docMap) + return docMap, nil + } +} + +func parseElement(parent *xmlquery.Node, parentMap *pcommon.Map) { + // Count the number of each element tag so we know whether it will be a member of a slice or not + childTags := make(map[string]int) + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type != xmlquery.ElementNode { + continue + } + childTags[child.Data]++ + } + if len(childTags) == 0 { + return + } + + // Convert the children, now knowing whether they will be a member of a slice or not + for child := parent.FirstChild; child != nil; child = child.NextSibling { + if child.Type != xmlquery.ElementNode || child.FirstChild == nil { + continue + } + + leafValue := leafValueFromElement(child) + + // Slice of the same element + if childTags[child.Data] > 1 { + // Get or create the slice of children + var childrenSlice pcommon.Slice + childrenValue, ok := parentMap.Get(child.Data) + if ok { + childrenSlice = childrenValue.Slice() + } else { + childrenSlice = parentMap.PutEmptySlice(child.Data) + } + + // Add the child's text content to the slice + if leafValue != "" { + childrenSlice.AppendEmpty().SetStr(leafValue) + continue + } + + // Parse the child to make sure there's something to add + childMap := pcommon.NewMap() + parseElement(child, &childMap) + if childMap.Len() == 0 { + continue + } + + sliceValue := childrenSlice.AppendEmpty() + sliceMap := sliceValue.SetEmptyMap() + childMap.CopyTo(sliceMap) + continue + } + + if leafValue != "" { + parentMap.PutStr(child.Data, leafValue) + continue + } + + // Child will be a map + childMap := pcommon.NewMap() + parseElement(child, &childMap) + if childMap.Len() == 0 { + continue + } + + childMap.CopyTo(parentMap.PutEmptyMap(child.Data)) + } +} + +func leafValueFromElement(node *xmlquery.Node) string { + // First check if there are any child elements. If there are, ignore any extraneous text. + for child := node.FirstChild; child != nil; child = child.NextSibling { + if child.Type == xmlquery.ElementNode { + return "" + } + } + + // No child elements, so return the first text or CDATA content + for child := node.FirstChild; child != nil; child = child.NextSibling { + switch child.Type { + case xmlquery.TextNode, xmlquery.CharDataNode: + return child.Data + } + } + return "" +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go new file mode 100644 index 00000000000..b45ee74fcd1 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_remove_xml.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "strings" + + "github.com/antchfx/xmlquery" + "github.com/antchfx/xpath" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type RemoveXMLArguments[K any] struct { + Target ottl.StringGetter[K] + XPath string +} + +func NewRemoveXMLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("RemoveXML", &RemoveXMLArguments[K]{}, createRemoveXMLFunction[K]) +} + +func createRemoveXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*RemoveXMLArguments[K]) + + if !ok { + return nil, fmt.Errorf("RemoveXML args must be of type *RemoveXMLAguments[K]") + } + + if err := validateXPath(args.XPath); err != nil { + return nil, err + } + + return removeXML(args.Target, args.XPath), nil +} + +// removeXML returns a XML formatted string that is a result of removing all matching nodes from the target XML. +// This currently supports removal of elements, attributes, text values, comments, and CharData. +func removeXML[K any](target ottl.StringGetter[K], xPath string) ottl.ExprFunc[K] { + return func(ctx context.Context, tCtx K) (any, error) { + var doc *xmlquery.Node + if targetVal, err := target.Get(ctx, tCtx); err != nil { + return nil, err + } else if doc, err = parseNodesXML(targetVal); err != nil { + return nil, err + } + + nodes, err := xmlquery.QueryAll(doc, xPath) + if err != nil { + return nil, err + } + + for _, n := range nodes { + switch n.Type { + case xmlquery.ElementNode: + xmlquery.RemoveFromTree(n) + case xmlquery.AttributeNode: + n.Parent.RemoveAttr(n.Data) + case xmlquery.TextNode: + n.Data = "" + case xmlquery.CommentNode: + xmlquery.RemoveFromTree(n) + case xmlquery.CharDataNode: + xmlquery.RemoveFromTree(n) + } + } + return doc.OutputXML(false), nil + } +} + +func validateXPath(xPath string) error { + _, err := xpath.Compile(xPath) + if err != nil { + return fmt.Errorf("invalid xpath: %w", err) + } + return nil +} + +// Aside from parsing the XML document, this function also ensures that +// the XML declaration is included in the result only if it was present in +// the original document. +func parseNodesXML(targetVal string) (*xmlquery.Node, error) { + preserveDeclearation := strings.HasPrefix(targetVal, " limit { value.SetStr(stringVal[:limit]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go index 3d122dfb874..16705353813 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_time.go @@ -18,6 +18,7 @@ type TruncateTimeArguments[K any] struct { func NewTruncateTimeFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("TruncateTime", &TruncateTimeArguments[K]{}, createTruncateTimeFunction[K]) } + func createTruncateTimeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*TruncateTimeArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go new file mode 100644 index 00000000000..47c5065ff0e --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + "time" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type UnixArguments[K any] struct { + Seconds ottl.IntGetter[K] + Nanoseconds ottl.Optional[ottl.IntGetter[K]] +} + +func NewUnixFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Unix", &UnixArguments[K]{}, createUnixFunction[K]) +} + +func createUnixFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*UnixArguments[K]) + + if !ok { + return nil, fmt.Errorf("UnixFactory args must be of type *UnixArguments[K]") + } + + return Unix(args.Seconds, args.Nanoseconds) +} + +func Unix[K any](seconds ottl.IntGetter[K], nanoseconds ottl.Optional[ottl.IntGetter[K]]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + sec, err := seconds.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + var nsec int64 + + if !nanoseconds.IsEmpty() { + nsec, err = nanoseconds.Get().Get(ctx, tCtx) + if err != nil { + return nil, err + } + } + + return time.Unix(sec, nsec), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go index d55f717c750..939941b12fe 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_micro.go @@ -17,6 +17,7 @@ type UnixMicroArguments[K any] struct { func NewUnixMicroFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixMicro", &UnixMicroArguments[K]{}, createUnixMicroFunction[K]) } + func createUnixMicroFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixMicroArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go index 8f5e587c73b..7aabfedb47a 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_milli.go @@ -17,6 +17,7 @@ type UnixMilliArguments[K any] struct { func NewUnixMilliFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixMilli", &UnixMilliArguments[K]{}, createUnixMilliFunction[K]) } + func createUnixMilliFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixMilliArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go index 4c027d855fc..5ba82f5d607 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_nano.go @@ -17,6 +17,7 @@ type UnixNanoArguments[K any] struct { func NewUnixNanoFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixNano", &UnixNanoArguments[K]{}, createUnixNanoFunction[K]) } + func createUnixNanoFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixNanoArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go index 612a87f8c50..55b7f0f130e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_unix_seconds.go @@ -17,6 +17,7 @@ type UnixSecondsArguments[K any] struct { func NewUnixSecondsFactory[K any]() ottl.Factory[K] { return ottl.NewFactory("UnixSeconds", &UnixSecondsArguments[K]{}, createUnixSecondsFunction[K]) } + func createUnixSecondsFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { args, ok := oArgs.(*UnixSecondsArguments[K]) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go new file mode 100644 index 00000000000..371576bb024 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_url.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type URLArguments[K any] struct { + URI ottl.StringGetter[K] +} + +func NewURLFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("URL", &URLArguments[K]{}, createURIFunction[K]) +} + +func createURIFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*URLArguments[K]) + if !ok { + return nil, fmt.Errorf("URLFactory args must be of type *URLArguments[K]") + } + + return url(args.URI), nil //revive:disable-line:var-naming +} + +func url[K any](uriSource ottl.StringGetter[K]) ottl.ExprFunc[K] { //revive:disable-line:var-naming + return func(ctx context.Context, tCtx K) (any, error) { + urlString, err := uriSource.Get(ctx, tCtx) + if err != nil { + return nil, err + } + + if urlString == "" { + return nil, fmt.Errorf("url cannot be empty") + } + + return parseutils.ParseURI(urlString, true) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go new file mode 100644 index 00000000000..0ba1d1d9f34 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_useragent.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" +import ( + "context" + "fmt" + + "github.com/ua-parser/uap-go/uaparser" + semconv "go.opentelemetry.io/collector/semconv/v1.25.0" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type UserAgentArguments[K any] struct { + UserAgent ottl.StringGetter[K] +} + +func NewUserAgentFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("UserAgent", &UserAgentArguments[K]{}, createUserAgentFunction[K]) +} + +func createUserAgentFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*UserAgentArguments[K]) + if !ok { + return nil, fmt.Errorf("URLFactory args must be of type *URLArguments[K]") + } + + return userAgent[K](args.UserAgent), nil +} + +func userAgent[K any](userAgentSource ottl.StringGetter[K]) ottl.ExprFunc[K] { //revive:disable-line:var-naming + parser := uaparser.NewFromSaved() + + return func(ctx context.Context, tCtx K) (any, error) { + userAgentString, err := userAgentSource.Get(ctx, tCtx) + if err != nil { + return nil, err + } + parsedUserAgent := parser.ParseUserAgent(userAgentString) + return map[string]any{ + semconv.AttributeUserAgentName: parsedUserAgent.Family, + semconv.AttributeUserAgentOriginal: userAgentString, + semconv.AttributeUserAgentVersion: parsedUserAgent.ToVersionString(), + }, nil + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go index 9c03835f503..fec985989d2 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_uuid.go @@ -12,7 +12,7 @@ import ( ) func uuid[K any]() (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { + return func(_ context.Context, _ K) (any, error) { u := guuid.New() return u.String(), nil }, nil diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go new file mode 100644 index 00000000000..b64b35bcd8d --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_year.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs" + +import ( + "context" + "fmt" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +type YearArguments[K any] struct { + Time ottl.TimeGetter[K] +} + +func NewYearFactory[K any]() ottl.Factory[K] { + return ottl.NewFactory("Year", &YearArguments[K]{}, createYearFunction[K]) +} + +func createYearFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) { + args, ok := oArgs.(*YearArguments[K]) + + if !ok { + return nil, fmt.Errorf("YearFactory args must be of type *YearArguments[K]") + } + + return Year(args.Time) +} + +func Year[K any](time ottl.TimeGetter[K]) (ottl.ExprFunc[K], error) { + return func(ctx context.Context, tCtx K) (any, error) { + t, err := time.Get(ctx, tCtx) + if err != nil { + return nil, err + } + return int64(t.Year()), nil + }, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go index 9bb33ff3230..6fae06eb6b0 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/functions.go @@ -12,6 +12,7 @@ func StandardFuncs[K any]() map[string]ottl.Factory[K] { // Editors NewDeleteKeyFactory[K](), NewDeleteMatchingKeysFactory[K](), + NewKeepMatchingKeysFactory[K](), NewFlattenFactory[K](), NewKeepKeysFactory[K](), NewLimitFactory[K](), @@ -36,45 +37,70 @@ func converters[K any]() []ottl.Factory[K] { return []ottl.Factory[K]{ // Converters NewBase64DecodeFactory[K](), + NewDecodeFactory[K](), NewConcatFactory[K](), NewConvertCaseFactory[K](), + NewConvertAttributesToElementsXMLFactory[K](), + NewConvertTextToElementsXMLFactory[K](), + NewDayFactory[K](), NewDoubleFactory[K](), NewDurationFactory[K](), NewExtractPatternsFactory[K](), + NewExtractGrokPatternsFactory[K](), NewFnvFactory[K](), + NewGetXMLFactory[K](), NewHourFactory[K](), NewHoursFactory[K](), + NewInsertXMLFactory[K](), NewIntFactory[K](), NewIsBoolFactory[K](), NewIsDoubleFactory[K](), + NewIsListFactory[K](), NewIsIntFactory[K](), NewIsMapFactory[K](), NewIsMatchFactory[K](), NewIsStringFactory[K](), NewLenFactory[K](), NewLogFactory[K](), + NewMD5Factory[K](), NewMicrosecondsFactory[K](), NewMillisecondsFactory[K](), + NewMinuteFactory[K](), NewMinutesFactory[K](), + NewMonthFactory[K](), NewNanosecondsFactory[K](), NewNowFactory[K](), NewParseCSVFactory[K](), NewParseJSONFactory[K](), NewParseKeyValueFactory[K](), + NewParseSimplifiedXMLFactory[K](), NewParseXMLFactory[K](), + NewRemoveXMLFactory[K](), NewSecondsFactory[K](), NewSHA1Factory[K](), NewSHA256Factory[K](), + NewSHA512Factory[K](), + NewSortFactory[K](), NewSpanIDFactory[K](), NewSplitFactory[K](), + NewFormatFactory[K](), + NewStringFactory[K](), NewSubstringFactory[K](), NewTimeFactory[K](), + NewToKeyValueStringFactory[K](), NewTruncateTimeFactory[K](), NewTraceIDFactory[K](), + NewUnixFactory[K](), NewUnixMicroFactory[K](), NewUnixMilliFactory[K](), NewUnixNanoFactory[K](), NewUnixSecondsFactory[K](), NewUUIDFactory[K](), + NewURLFactory[K](), + NewUserAgentFactory[K](), + NewAppendFactory[K](), + NewYearFactory[K](), + NewHexFactory[K](), + NewSliceToMapFactory[K](), } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go index f5b0bf632be..fade87d2982 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go @@ -7,6 +7,8 @@ import ( "context" "errors" "fmt" + "sort" + "strings" "github.com/alecthomas/participle/v2" "go.opentelemetry.io/collector/component" @@ -16,9 +18,10 @@ import ( // Statement holds a top level Statement for processing telemetry data. A Statement is a combination of a function // invocation and the boolean expression to match telemetry for invoking the function. type Statement[K any] struct { - function Expr[K] - condition BoolExpr[K] - origText string + function Expr[K] + condition BoolExpr[K] + origText string + telemetrySettings component.TelemetrySettings } // Execute is a function that will execute the statement's function if the statement's condition is met. @@ -27,6 +30,11 @@ type Statement[K any] struct { // In addition, the functions return value is always returned. func (s *Statement[K]) Execute(ctx context.Context, tCtx K) (any, bool, error) { condition, err := s.condition.Eval(ctx, tCtx) + defer func() { + if s.telemetrySettings.Logger != nil { + s.telemetrySettings.Logger.Debug("TransformContext after statement execution", zap.String("statement", s.origText), zap.Bool("condition matched", condition), zap.Any("TransformContext", tCtx)) + } + }() if err != nil { return nil, false, err } @@ -58,6 +66,7 @@ type Parser[K any] struct { pathParser PathExpressionParser[K] enumParser EnumParser telemetrySettings component.TelemetrySettings + pathContextNames map[string]struct{} } func NewParser[K any]( @@ -91,6 +100,22 @@ func WithEnumParser[K any](parser EnumParser) Option[K] { } } +// WithPathContextNames sets the context names to be considered when parsing a Path value. +// When this option is empty or nil, all Path segments are considered fields, and the +// Path.Context value is always empty. +// When this option is configured, and the path's context is empty or is not present in +// this context names list, it results into an error. +func WithPathContextNames[K any](contexts []string) Option[K] { + return func(p *Parser[K]) { + pathContextNames := make(map[string]struct{}, len(contexts)) + for _, ctx := range contexts { + pathContextNames[ctx] = struct{}{} + } + + p.pathContextNames = pathContextNames + } +} + // ParseStatements parses string statements into ottl.Statement objects ready for execution. // Returns a slice of statements and a nil error on successful parsing. // If parsing fails, returns nil and a joined error containing each error per failed statement. @@ -131,9 +156,10 @@ func (p *Parser[K]) ParseStatement(statement string) (*Statement[K], error) { return nil, err } return &Statement[K]{ - function: function, - condition: expression, - origText: statement, + function: function, + condition: expression, + origText: statement, + telemetrySettings: p.telemetrySettings, }, nil } @@ -178,12 +204,40 @@ func (p *Parser[K]) ParseCondition(condition string) (*Condition[K], error) { }, nil } -var parser = newParser[parsedStatement]() -var conditionParser = newParser[booleanExpression]() +// prependContextToStatementPaths changes the given OTTL statement adding the context name prefix +// to all context-less paths. No modifications are performed for paths which [Path.Context] +// value matches any WithPathContextNames value. +// The context argument must be valid WithPathContextNames value, otherwise an error is returned. +func (p *Parser[K]) prependContextToStatementPaths(context string, statement string) (string, error) { + if _, ok := p.pathContextNames[context]; !ok { + return statement, fmt.Errorf(`unknown context "%s" for parser %T, valid options are: %s`, context, p, p.buildPathContextNamesText("")) + } + parsed, err := parseStatement(statement) + if err != nil { + return "", err + } + paths := getParsedStatementPaths(parsed) + if len(paths) == 0 { + return statement, nil + } + + var missingContextOffsets []int + for _, it := range paths { + if _, ok := p.pathContextNames[it.Context]; !ok { + missingContextOffsets = append(missingContextOffsets, it.Pos.Offset) + } + } + + return insertContextIntoStatementOffsets(context, statement, missingContextOffsets) +} + +var ( + parser = newParser[parsedStatement]() + conditionParser = newParser[booleanExpression]() +) func parseStatement(raw string) (*parsedStatement, error) { parsed, err := parser.ParseString("", raw) - if err != nil { return nil, fmt.Errorf("statement has invalid syntax: %w", err) } @@ -197,7 +251,6 @@ func parseStatement(raw string) (*parsedStatement, error) { func parseCondition(raw string) (*booleanExpression, error) { parsed, err := conditionParser.ParseString("", raw) - if err != nil { return nil, fmt.Errorf("condition has invalid syntax: %w", err) } @@ -209,6 +262,30 @@ func parseCondition(raw string) (*booleanExpression, error) { return parsed, nil } +func insertContextIntoStatementOffsets(context string, statement string, offsets []int) (string, error) { + if len(offsets) == 0 { + return statement, nil + } + + contextPrefix := context + "." + var sb strings.Builder + sb.Grow(len(statement) + (len(contextPrefix) * len(offsets))) + + sort.Ints(offsets) + left := 0 + for _, offset := range offsets { + if offset < 0 || offset > len(statement) { + return statement, fmt.Errorf(`failed to insert context "%s" into statement "%s": offset %d is out of range`, context, statement, offset) + } + sb.WriteString(statement[left:offset]) + sb.WriteString(contextPrefix) + left = offset + } + sb.WriteString(statement[left:]) + + return sb.String(), nil +} + // newParser returns a parser that can be used to read a string into a parsedStatement. An error will be returned if the string // is not formatted for the DSL. func newParser[G any]() *participle.Parser[G] { @@ -262,6 +339,7 @@ func NewStatementSequence[K any](statements []*Statement[K], telemetrySettings c // When the ErrorMode of the StatementSequence is `ignore`, errors are logged and execution continues to the next statement. // When the ErrorMode of the StatementSequence is `silent`, errors are not logged and execution continues to the next statement. func (s *StatementSequence[K]) Execute(ctx context.Context, tCtx K) error { + s.telemetrySettings.Logger.Debug("initial TransformContext before executing StatementSequence", zap.Any("TransformContext", tCtx)) for _, statement := range s.statements { _, _, err := statement.Execute(ctx, tCtx) if err != nil { @@ -333,6 +411,7 @@ func (c *ConditionSequence[K]) Eval(ctx context.Context, tCtx K) (bool, error) { var atLeastOneMatch bool for _, condition := range c.conditions { match, err := condition.Eval(ctx, tCtx) + c.telemetrySettings.Logger.Debug("condition evaluation result", zap.String("condition", condition.origText), zap.Bool("match", match), zap.Any("TransformContext", tCtx)) if err != nil { if c.errorMode == PropagateError { err = fmt.Errorf("failed to eval condition: %v, %w", condition.origText, err) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go new file mode 100644 index 00000000000..72d0d6abb3f --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser_collection.go @@ -0,0 +1,334 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + +import ( + "fmt" + "reflect" + + "go.opentelemetry.io/collector/component" + "go.uber.org/zap" +) + +// Safeguard to statically ensure the Parser.ParseStatements method can be reflectively +// invoked by the ottlParserWrapper.parseStatements +var _ interface { + ParseStatements(statements []string) ([]*Statement[any], error) +} = (*Parser[any])(nil) + +// Safeguard to statically ensure any ParsedStatementConverter method can be reflectively +// invoked by the statementsConverterWrapper.call +var _ ParsedStatementConverter[any, any] = func( + _ *ParserCollection[any], + _ *Parser[any], + _ string, + _ StatementsGetter, + _ []*Statement[any], +) (any, error) { + return nil, nil +} + +// StatementsGetter represents a set of statements to be parsed. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type StatementsGetter interface { + // GetStatements retrieves the OTTL statements to be parsed + GetStatements() []string +} + +type defaultStatementsGetter []string + +func (d defaultStatementsGetter) GetStatements() []string { + return d +} + +// NewStatementsGetter creates a new StatementsGetter. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func NewStatementsGetter(statements []string) StatementsGetter { + return defaultStatementsGetter(statements) +} + +// ottlParserWrapper wraps an ottl.Parser using reflection, so it can invoke exported +// methods without knowing its generic type (transform context). +type ottlParserWrapper struct { + parser reflect.Value + prependContextToStatementPaths func(context string, statement string) (string, error) +} + +func newParserWrapper[K any](parser *Parser[K]) *ottlParserWrapper { + return &ottlParserWrapper{ + parser: reflect.ValueOf(parser), + prependContextToStatementPaths: parser.prependContextToStatementPaths, + } +} + +func (g *ottlParserWrapper) parseStatements(statements []string) (reflect.Value, error) { + method := g.parser.MethodByName("ParseStatements") + parseStatementsRes := method.Call([]reflect.Value{reflect.ValueOf(statements)}) + err := parseStatementsRes[1] + if !err.IsNil() { + return reflect.Value{}, err.Interface().(error) + } + return parseStatementsRes[0], nil +} + +func (g *ottlParserWrapper) prependContextToStatementsPaths(context string, statements []string) ([]string, error) { + result := make([]string, 0, len(statements)) + for _, s := range statements { + prependedStatement, err := g.prependContextToStatementPaths(context, s) + if err != nil { + return nil, err + } + result = append(result, prependedStatement) + } + return result, nil +} + +// statementsConverterWrapper is a reflection-based wrapper to the ParsedStatementConverter function, +// which does not require knowing all generic parameters to be called. +type statementsConverterWrapper reflect.Value + +func newStatementsConverterWrapper[K any, R any](converter ParsedStatementConverter[K, R]) statementsConverterWrapper { + return statementsConverterWrapper(reflect.ValueOf(converter)) +} + +func (s statementsConverterWrapper) call( + parserCollection reflect.Value, + ottlParser *ottlParserWrapper, + context string, + statements StatementsGetter, + parsedStatements reflect.Value, +) (reflect.Value, error) { + result := reflect.Value(s).Call([]reflect.Value{ + parserCollection, + ottlParser.parser, + reflect.ValueOf(context), + reflect.ValueOf(statements), + parsedStatements, + }) + + resultValue := result[0] + resultError := result[1] + if !resultError.IsNil() { + return reflect.Value{}, resultError.Interface().(error) + } + + return resultValue, nil +} + +// parserCollectionParser holds an ottlParserWrapper and its respectively +// statementsConverter function. +type parserCollectionParser struct { + ottlParser *ottlParserWrapper + statementsConverter statementsConverterWrapper +} + +// ParserCollection is a configurable set of ottl.Parser that can handle multiple OTTL contexts +// parsings, inferring the context, choosing the right parser for the given statements, and +// transforming the parsed ottl.Statement[K] slice into a common result of type R. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type ParserCollection[R any] struct { + contextParsers map[string]*parserCollectionParser + contextInferrer contextInferrer + modifiedStatementLogging bool + Settings component.TelemetrySettings + ErrorMode ErrorMode +} + +// ParserCollectionOption is a configurable ParserCollection option. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type ParserCollectionOption[R any] func(*ParserCollection[R]) error + +// NewParserCollection creates a new ParserCollection. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func NewParserCollection[R any]( + settings component.TelemetrySettings, + options ...ParserCollectionOption[R], +) (*ParserCollection[R], error) { + pc := &ParserCollection[R]{ + Settings: settings, + contextParsers: map[string]*parserCollectionParser{}, + contextInferrer: defaultPriorityContextInferrer(), + } + + for _, op := range options { + err := op(pc) + if err != nil { + return nil, err + } + } + + return pc, nil +} + +// ParsedStatementConverter is a function that converts the parsed ottl.Statement[K] into +// a common representation to all parser collection contexts passed through WithParserCollectionContext. +// Given each parser has its own transform context type, they must agree on a common type [R] +// so it can be returned by the ParserCollection.ParseStatements and ParserCollection.ParseStatementsWithContext +// functions. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +type ParsedStatementConverter[K any, R any] func( + collection *ParserCollection[R], + parser *Parser[K], + context string, + statements StatementsGetter, + parsedStatements []*Statement[K], +) (R, error) + +func newNopParsedStatementConverter[K any]() ParsedStatementConverter[K, any] { + return func( + _ *ParserCollection[any], + _ *Parser[K], + _ string, + _ StatementsGetter, + parsedStatements []*Statement[K], + ) (any, error) { + return parsedStatements, nil + } +} + +// WithParserCollectionContext configures an ottl.Parser for the given context. +// The provided ottl.Parser must be configured to support the provided context using +// the ottl.WithPathContextNames option. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func WithParserCollectionContext[K any, R any]( + context string, + parser *Parser[K], + converter ParsedStatementConverter[K, R], +) ParserCollectionOption[R] { + return func(mp *ParserCollection[R]) error { + if _, ok := parser.pathContextNames[context]; !ok { + return fmt.Errorf(`context "%s" must be a valid "%T" path context name`, context, parser) + } + mp.contextParsers[context] = &parserCollectionParser{ + ottlParser: newParserWrapper[K](parser), + statementsConverter: newStatementsConverterWrapper(converter), + } + return nil + } +} + +// WithParserCollectionErrorMode has no effect on the ParserCollection, but might be used +// by the ParsedStatementConverter functions to handle/create StatementSequence. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func WithParserCollectionErrorMode[R any](errorMode ErrorMode) ParserCollectionOption[R] { + return func(tp *ParserCollection[R]) error { + tp.ErrorMode = errorMode + return nil + } +} + +// EnableParserCollectionModifiedStatementLogging controls the statements modification logs. +// When enabled, it logs any statements modifications performed by the parsing operations, +// instructing users to rewrite the statements accordingly. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func EnableParserCollectionModifiedStatementLogging[R any](enabled bool) ParserCollectionOption[R] { + return func(tp *ParserCollection[R]) error { + tp.modifiedStatementLogging = enabled + return nil + } +} + +// ParseStatements parses the given statements into [R] using the configured context's ottl.Parser +// and subsequently calling the ParsedStatementConverter function. +// The statement's context is automatically inferred from the [Path.Context] values, choosing the +// highest priority context found. +// If no contexts are present in the statements, or if the inferred value is not supported by +// the [ParserCollection], it returns an error. +// If parsing the statements fails, it returns the underlying [ottl.Parser.ParseStatements] error. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func (pc *ParserCollection[R]) ParseStatements(statements StatementsGetter) (R, error) { + statementsValues := statements.GetStatements() + inferredContext, err := pc.contextInferrer.infer(statementsValues) + if err != nil { + return *new(R), err + } + + if inferredContext == "" { + return *new(R), fmt.Errorf("unable to infer context from statements [%v], path's first segment must be a valid context name", statementsValues) + } + + return pc.ParseStatementsWithContext(inferredContext, statements, false) +} + +// ParseStatementsWithContext parses the given statements into [R] using the configured +// context's ottl.Parser and subsequently calling the ParsedStatementConverter function. +// Unlike ParseStatements, it uses the provided context and does not infer it +// automatically. The context value must be supported by the [ParserCollection], +// otherwise an error is returned. +// If the statement's Path does not provide their Path.Context value, the prependPathsContext +// argument should be set to true, so it rewrites the statements prepending the missing paths +// contexts. +// If parsing the statements fails, it returns the underlying [ottl.Parser.ParseStatements] error. +// +// Experimental: *NOTE* this API is subject to change or removal in the future. +func (pc *ParserCollection[R]) ParseStatementsWithContext(context string, statements StatementsGetter, prependPathsContext bool) (R, error) { + contextParser, ok := pc.contextParsers[context] + if !ok { + return *new(R), fmt.Errorf(`unknown context "%s" for stataments: %v`, context, statements.GetStatements()) + } + + var err error + var parsingStatements []string + if prependPathsContext { + originalStatements := statements.GetStatements() + parsingStatements, err = contextParser.ottlParser.prependContextToStatementsPaths(context, originalStatements) + if err != nil { + return *new(R), err + } + if pc.modifiedStatementLogging { + pc.logModifiedStatements(originalStatements, parsingStatements) + } + } else { + parsingStatements = statements.GetStatements() + } + + parsedStatements, err := contextParser.ottlParser.parseStatements(parsingStatements) + if err != nil { + return *new(R), err + } + + convertedStatements, err := contextParser.statementsConverter.call( + reflect.ValueOf(pc), + contextParser.ottlParser, + context, + statements, + parsedStatements, + ) + if err != nil { + return *new(R), err + } + + if convertedStatements.IsNil() { + return *new(R), nil + } + + return convertedStatements.Interface().(R), nil +} + +func (pc *ParserCollection[R]) logModifiedStatements(originalStatements, modifiedStatements []string) { + var fields []zap.Field + for i, original := range originalStatements { + if modifiedStatements[i] != original { + statementKey := fmt.Sprintf("[%v]", i) + fields = append(fields, zap.Dict( + statementKey, + zap.String("original", original), + zap.String("modified", modifiedStatements[i])), + ) + } + } + if len(fields) > 0 { + pc.Settings.Logger.Info("one or more statements were modified to include their paths context, please rewrite them accordingly", zap.Dict("statements", fields...)) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go new file mode 100644 index 00000000000..dbb66ee7c99 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/paths.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + +// grammarPathVisitor is used to extract all path from a parsedStatement or booleanExpression +type grammarPathVisitor struct { + paths []path +} + +func (v *grammarPathVisitor) visitEditor(_ *editor) {} +func (v *grammarPathVisitor) visitValue(_ *value) {} +func (v *grammarPathVisitor) visitMathExprLiteral(_ *mathExprLiteral) {} + +func (v *grammarPathVisitor) visitPath(value *path) { + v.paths = append(v.paths, *value) +} + +func getParsedStatementPaths(ps *parsedStatement) []path { + visitor := &grammarPathVisitor{} + ps.Editor.accept(visitor) + if ps.WhereClause != nil { + ps.WhereClause.accept(visitor) + } + return visitor.paths +} + +func getBooleanExpressionPaths(be *booleanExpression) []path { + visitor := &grammarPathVisitor{} + be.accept(visitor) + return visitor.paths +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go index 6826de769b8..172789c607b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go @@ -31,6 +31,31 @@ var ( emptyHash = [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} ) +// HashOption is a function that sets an option on the hash calculation. +type HashOption func(*hashWriter) + +// WithMap adds a map to the hash calculation. +func WithMap(m pcommon.Map) HashOption { + return func(hw *hashWriter) { + hw.writeMapHash(m) + } +} + +// WithValue adds a value to the hash calculation. +func WithValue(v pcommon.Value) HashOption { + return func(hw *hashWriter) { + hw.writeValueHash(v) + } +} + +// WithString adds a string to the hash calculation. +func WithString(s string) HashOption { + return func(hw *hashWriter) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) + } +} + type hashWriter struct { byteBuf []byte keysBuf []string @@ -47,6 +72,29 @@ var hashWriterPool = &sync.Pool{ New: func() any { return newHashWriter() }, } +// Hash generates a hash for the provided options and returns the computed hash as a [16]byte. +func Hash(opts ...HashOption) [16]byte { + if len(opts) == 0 { + return emptyHash + } + + hw := hashWriterPool.Get().(*hashWriter) + defer hashWriterPool.Put(hw) + hw.byteBuf = hw.byteBuf[:0] + + for _, o := range opts { + o(hw) + } + + return hw.hashSum128() +} + +// Hash64 generates a hash for the provided options and returns the computed hash as a uint64. +func Hash64(opts ...HashOption) uint64 { + hash := Hash(opts...) + return xxhash.Sum64(hash[:]) +} + // MapHash return a hash for the provided map. // Maps with the same underlying key/value pairs in different order produce the same deterministic hash value. func MapHash(m pcommon.Map) [16]byte { @@ -105,8 +153,7 @@ func (hw *hashWriter) writeMapHash(m pcommon.Map) { func (hw *hashWriter) writeValueHash(v pcommon.Value) { switch v.Type() { case pcommon.ValueTypeStr: - hw.byteBuf = append(hw.byteBuf, valStrPrefix...) - hw.byteBuf = append(hw.byteBuf, v.Str()...) + hw.writeString(v.Str()) case pcommon.ValueTypeBool: if v.Bool() { hw.byteBuf = append(hw.byteBuf, valBoolTrue...) @@ -138,6 +185,11 @@ func (hw *hashWriter) writeValueHash(v pcommon.Value) { } } +func (hw *hashWriter) writeString(s string) { + hw.byteBuf = append(hw.byteBuf, valStrPrefix...) + hw.byteBuf = append(hw.byteBuf, s...) +} + // hashSum128 returns a [16]byte hash sum. func (hw *hashWriter) hashSum128() [16]byte { r := [16]byte{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go index b2c96d28f56..5b608570612 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resourcelogs_to_logs.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "strconv" + "time" jsoniter "github.com/json-iterator/go" "github.com/relvacode/iso8601" @@ -20,7 +21,7 @@ import ( const ( // Constants for OpenTelemetry Specs - scopeName = "otelcol/azureresourcelogs" + scopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure" // Constants for Azure Log Records azureCategory = "azure.category" @@ -37,9 +38,7 @@ const ( azureTenantID = "azure.tenant.id" ) -var ( - errMissingTimestamp = errors.New("missing timestamp") -) +var errMissingTimestamp = errors.New("missing timestamp") // azureRecords represents an array of Azure log records // as exported via an Azure Event Hub @@ -73,8 +72,9 @@ type azureLogRecord struct { var _ plog.Unmarshaler = (*ResourceLogsUnmarshaler)(nil) type ResourceLogsUnmarshaler struct { - Version string - Logger *zap.Logger + Version string + Logger *zap.Logger + TimeFormats []string } func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { @@ -107,7 +107,7 @@ func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { for i := 0; i < len(logs); i++ { log := logs[i] - nanos, err := getTimestamp(log) + nanos, err := getTimestamp(log, r.TimeFormats...) if err != nil { r.Logger.Warn("Unable to convert timestamp from log", zap.String("timestamp", log.Time)) continue @@ -131,11 +131,11 @@ func (r ResourceLogsUnmarshaler) UnmarshalLogs(buf []byte) (plog.Logs, error) { return l, nil } -func getTimestamp(record azureLogRecord) (pcommon.Timestamp, error) { +func getTimestamp(record azureLogRecord, formats ...string) (pcommon.Timestamp, error) { if record.Time != "" { - return asTimestamp(record.Time) + return asTimestamp(record.Time, formats...) } else if record.Timestamp != "" { - return asTimestamp(record.Timestamp) + return asTimestamp(record.Timestamp, formats...) } return 0, errMissingTimestamp @@ -144,13 +144,21 @@ func getTimestamp(record azureLogRecord) (pcommon.Timestamp, error) { // asTimestamp will parse an ISO8601 string into an OpenTelemetry // nanosecond timestamp. If the string cannot be parsed, it will // return zero and the error. -func asTimestamp(s string) (pcommon.Timestamp, error) { - t, err := iso8601.ParseString(s) - if err != nil { - return 0, err +func asTimestamp(s string, formats ...string) (pcommon.Timestamp, error) { + var err error + var t time.Time + // Try parsing with provided formats first + for _, format := range formats { + if t, err = time.Parse(format, s); err == nil { + return pcommon.Timestamp(t.UnixNano()), nil + } } - return pcommon.Timestamp(t.UnixNano()), nil + // Fallback to ISO 8601 parsing if no format matches + if t, err = iso8601.ParseString(s); err == nil { + return pcommon.Timestamp(t.UnixNano()), nil + } + return 0, err } // asSeverity converts the Azure log level to equivalent @@ -167,7 +175,7 @@ func asSeverity(number json.Number) plog.SeverityNumber { case "Critical": return plog.SeverityNumberFatal default: - var levelNumber, _ = number.Int64() + levelNumber, _ := number.Int64() if levelNumber > 0 { return plog.SeverityNumber(levelNumber) } @@ -177,7 +185,7 @@ func asSeverity(number json.Number) plog.SeverityNumber { } func extractRawAttributes(log azureLogRecord) map[string]any { - var attrs = map[string]any{} + attrs := map[string]any{} attrs[azureCategory] = log.Category setIf(attrs, azureCorrelationID, log.CorrelationID) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go new file mode 100644 index 00000000000..2f92d013248 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure/resources_to_traces.go @@ -0,0 +1,183 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package azure // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure" + +import ( + "bytes" + "encoding/hex" + "net/url" + + jsoniter "github.com/json-iterator/go" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + conventions "go.opentelemetry.io/collector/semconv/v1.13.0" + "go.uber.org/zap" +) + +const ( + // Constants for OpenTelemetry Specs + traceAzureResourceID = "azure.resource.id" +) + +type azureTracesRecords struct { + Records []azureTracesRecord `json:"records"` +} + +// Azure Trace Records based on Azure AppRequests & AppDependencies table data +// the common record schema reference: +// https://learn.microsoft.com/en-us/azure/azure-monitor/reference/tables/apprequests +// https://learn.microsoft.com/en-us/azure/azure-monitor/reference/tables/appdependencies +type azureTracesRecord struct { + Time string `json:"time"` + ResourceID string `json:"resourceId"` + ResourceGUID string `json:"ResourceGUID"` + Type string `json:"Type"` + AppRoleInstance string `json:"AppRoleInstance"` + AppRoleName string `json:"AppRoleName"` + AppVersion string `json:"AppVersion"` + ClientCity string `json:"ClientCity"` + ClientCountryOrRegion string `json:"ClientCountryOrRegion"` + ClientIP string `json:"ClientIP"` + ClientStateOrProvince string `json:"ClientStateOrProvince"` + ClientType string `json:"ClientType"` + IKey string `json:"IKey"` + OperationName string `json:"OperationName"` + OperationID string `json:"OperationId"` + ParentID string `json:"ParentId"` + SDKVersion string `json:"SDKVersion"` + Properties map[string]string `json:"Properties"` + Measurements map[string]float64 `json:"Measurements"` + SpanID string `json:"Id"` + Name string `json:"Name"` + URL string `json:"Url"` + Source string `json:"Source"` + Success bool `json:"Success"` + ResultCode string `json:"ResultCode"` + DurationMs float64 `json:"DurationMs"` + PerformanceBucket string `json:"PerformanceBucket"` + ItemCount float64 `json:"ItemCount"` +} + +var _ ptrace.Unmarshaler = (*TracesUnmarshaler)(nil) + +type TracesUnmarshaler struct { + Version string + Logger *zap.Logger + TimeFormats []string +} + +func (r TracesUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { + t := ptrace.NewTraces() + + var azureTraces azureTracesRecords + decoder := jsoniter.NewDecoder(bytes.NewReader(buf)) + err := decoder.Decode(&azureTraces) + if err != nil { + return t, err + } + + resourceTraces := t.ResourceSpans().AppendEmpty() + resource := resourceTraces.Resource() + resource.Attributes().PutStr(conventions.AttributeTelemetrySDKName, scopeName) + resource.Attributes().PutStr(conventions.AttributeTelemetrySDKLanguage, conventions.AttributeTelemetrySDKLanguageGo) + resource.Attributes().PutStr(conventions.AttributeTelemetrySDKVersion, r.Version) + resource.Attributes().PutStr(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) + + scopeSpans := resourceTraces.ScopeSpans().AppendEmpty() + + spans := scopeSpans.Spans() + + resourceID := "" + for _, azureTrace := range azureTraces.Records { + if resourceID == "" && azureTrace.ResourceID != "" { + resourceID = azureTrace.ResourceID + } + + resource.Attributes().PutStr("service.name", azureTrace.AppRoleName) + + nanos, err := asTimestamp(azureTrace.Time, r.TimeFormats...) + if err != nil { + r.Logger.Warn("Invalid Timestamp", zap.String("time", azureTrace.Time)) + continue + } + + traceID, traceErr := TraceIDFromHex(azureTrace.OperationID) + if traceErr != nil { + r.Logger.Warn("Invalid TraceID", zap.String("traceID", azureTrace.OperationID)) + return t, err + } + spanID, spanErr := SpanIDFromHex(azureTrace.SpanID) + if spanErr != nil { + r.Logger.Warn("Invalid SpanID", zap.String("spanID", azureTrace.SpanID)) + return t, err + } + parentID, parentErr := SpanIDFromHex(azureTrace.ParentID) + if parentErr != nil { + r.Logger.Warn("Invalid ParentID", zap.String("parentID", azureTrace.ParentID)) + return t, err + } + + span := spans.AppendEmpty() + span.SetTraceID(traceID) + span.SetSpanID(spanID) + span.SetParentSpanID(parentID) + + span.Attributes().PutStr("OperationName", azureTrace.OperationName) + span.Attributes().PutStr("AppRoleName", azureTrace.AppRoleName) + span.Attributes().PutStr("AppRoleInstance", azureTrace.AppRoleInstance) + span.Attributes().PutStr("Type", azureTrace.Type) + + span.Attributes().PutStr("http.url", azureTrace.URL) + + urlObj, _ := url.Parse(azureTrace.URL) + hostname := urlObj.Host + hostpath := urlObj.Path + scheme := urlObj.Scheme + + span.Attributes().PutStr("http.host", hostname) + span.Attributes().PutStr("http.path", hostpath) + span.Attributes().PutStr("http.response.status_code", azureTrace.ResultCode) + span.Attributes().PutStr("http.client_ip", azureTrace.ClientIP) + span.Attributes().PutStr("http.client_city", azureTrace.ClientCity) + span.Attributes().PutStr("http.client_type", azureTrace.ClientType) + span.Attributes().PutStr("http.client_state", azureTrace.ClientStateOrProvince) + span.Attributes().PutStr("http.client_type", azureTrace.ClientType) + span.Attributes().PutStr("http.client_country", azureTrace.ClientCountryOrRegion) + span.Attributes().PutStr("http.scheme", scheme) + span.Attributes().PutStr("http.method", azureTrace.Properties["HTTP Method"]) + + span.SetKind(ptrace.SpanKindServer) + span.SetName(azureTrace.Name) + span.SetStartTimestamp(nanos) + span.SetEndTimestamp(nanos + pcommon.Timestamp(azureTrace.DurationMs*1e6)) + } + + if resourceID != "" { + resourceTraces.Resource().Attributes().PutStr(traceAzureResourceID, resourceID) + } else { + r.Logger.Warn("No ResourceID Set on Traces!") + } + + return t, nil +} + +func TraceIDFromHex(hexStr string) (pcommon.TraceID, error) { + bytes, err := hex.DecodeString(hexStr) + if err != nil { + return pcommon.TraceID{}, err + } + var id pcommon.TraceID + copy(id[:], bytes) + return id, nil +} + +func SpanIDFromHex(hexStr string) (pcommon.SpanID, error) { + bytes, err := hex.DecodeString(hexStr) + if err != nil { + return pcommon.SpanID{}, err + } + var id pcommon.SpanID + copy(id[:], bytes) + return id, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go index 31b8caed453..42881b0e19b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/constants.go @@ -18,7 +18,5 @@ const ( // https://github.com/open-telemetry/opentelemetry-specification/blob/34b907207f3dfe1635a35c4cdac6b6ab3a495e18/specification/trace/sdk_exporters/jaeger.md#events const eventNameAttr = "event" -var ( - // errType indicates that a value is not convertible to the target type. - errType = errors.New("invalid type") -) +// errType indicates that a value is not convertible to the target type. +var errType = errors.New("invalid type") diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go index 099e13e5ec8..a0864b24cf1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/jaegerproto_to_traces.go @@ -14,7 +14,7 @@ import ( "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.9.0" + conventions "go.opentelemetry.io/collector/semconv/v1.16.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -441,9 +441,9 @@ func getTraceStateFromAttrs(attrs pcommon.Map) string { func getScope(span *model.Span) scope { il := scope{} - if libraryName, ok := getAndDeleteTag(span, conventions.OtelLibraryName); ok { + if libraryName, ok := getAndDeleteTag(span, conventions.AttributeOtelScopeName); ok { il.name = libraryName - if libraryVersion, ok := getAndDeleteTag(span, conventions.OtelLibraryVersion); ok { + if libraryVersion, ok := getAndDeleteTag(span, conventions.AttributeOtelScopeVersion); ok { il.version = libraryVersion } } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go index ecbd780f02b..2bfff448dcb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger/traces_to_jaegerproto.go @@ -7,7 +7,7 @@ import ( "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.9.0" + conventions "go.opentelemetry.io/collector/semconv/v1.16.0" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -15,11 +15,11 @@ import ( // ProtoFromTraces translates internal trace data into the Jaeger Proto for GRPC. // Returns slice of translated Jaeger batches and error if translation failed. -func ProtoFromTraces(td ptrace.Traces) ([]*model.Batch, error) { +func ProtoFromTraces(td ptrace.Traces) []*model.Batch { resourceSpans := td.ResourceSpans() if resourceSpans.Len() == 0 { - return nil, nil + return nil } batches := make([]*model.Batch, 0, resourceSpans.Len()) @@ -31,7 +31,7 @@ func ProtoFromTraces(td ptrace.Traces) ([]*model.Batch, error) { } } - return batches, nil + return batches } func resourceSpansToJaegerProto(rs ptrace.ResourceSpans) *model.Batch { @@ -90,7 +90,6 @@ func resourceToJaegerProtoProcess(resource pcommon.Resource) *model.Process { tags := make([]model.KeyValue, 0, attrsCount) process.Tags = appendTagsFromResourceAttributes(tags, attrs) return process - } func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pcommon.Map) []model.KeyValue { @@ -355,7 +354,6 @@ func getErrorTagFromStatusCode(statusCode ptrace.StatusCode) (model.KeyValue, bo }, true } return model.KeyValue{}, false - } func getTagFromStatusMsg(statusMsg string) (model.KeyValue, bool) { @@ -388,7 +386,7 @@ func getTagsFromInstrumentationLibrary(il pcommon.InstrumentationScope) ([]model var keyValues []model.KeyValue if ilName := il.Name(); ilName != "" { kv := model.KeyValue{ - Key: conventions.OtelLibraryName, + Key: conventions.AttributeOtelScopeName, VStr: ilName, VType: model.ValueType_STRING, } @@ -396,7 +394,7 @@ func getTagsFromInstrumentationLibrary(il pcommon.InstrumentationScope) ([]model } if ilVersion := il.Version(); ilVersion != "" { kv := model.KeyValue{ - Key: conventions.OtelLibraryVersion, + Key: conventions.AttributeOtelScopeVersion, VStr: ilVersion, VType: model.ValueType_STRING, } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go index 92c230e6f8b..188dd2a0df9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/oc_to_traces.go @@ -12,7 +12,7 @@ import ( "go.opencensus.io/trace" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.12.0" "google.golang.org/protobuf/types/known/wrapperspb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go index 9d6dae3841d..0cea312203c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus/traces_to_oc.go @@ -13,7 +13,7 @@ import ( "go.opencensus.io/trace" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.12.0" "google.golang.org/protobuf/types/known/wrapperspb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -159,7 +159,6 @@ func spanKindToOCAttribute(kind ptrace.SpanKind) *octrace.AttributeValue { case ptrace.SpanKindServer: // explicitly handled as SpanKind case ptrace.SpanKindClient: // explicitly handled as SpanKind default: - } if string(ocKind) == "" { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go index ee072550991..b45e7b18878 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1/json.go @@ -167,7 +167,6 @@ func jsonBinAnnotationsToSpanAttributes(span ptrace.Span, binAnnotations []*bina sMapper := &statusMapper{} var localComponent string for _, binAnnotation := range binAnnotations { - if binAnnotation.Endpoint != nil && binAnnotation.Endpoint.ServiceName != "" { fallbackServiceName = binAnnotation.Endpoint.ServiceName } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go index 5358aee8c38..29d3ca6fa80 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2/from_translator.go @@ -27,9 +27,7 @@ const ( spanLinkDataFormat = "%s|%s|%s|%s|%d" ) -var ( - sampled = true -) +var sampled = true // FromTranslator converts from pdata to Zipkin data model. type FromTranslator struct{} @@ -98,7 +96,6 @@ func spanToZipkinSpan( localServiceName string, zTags map[string]string, ) (*zipkinmodel.SpanModel, error) { - tags := aggregateSpanTags(span, zTags) zs := &zipkinmodel.SpanModel{} @@ -318,7 +315,6 @@ func zipkinEndpointFromTags( remoteEndpoint bool, redundantKeys map[string]bool, ) (endpoint *zipkinmodel.Endpoint) { - serviceName := localServiceName if peerSvc, ok := zTags[conventions.AttributePeerService]; ok && remoteEndpoint { serviceName = peerSvc diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md index 0ccf62e5e79..ba1c9b575e1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md @@ -3,20 +3,15 @@ | Status | | | ------------- |-----------| | Stability | [alpha]: traces, metrics, logs | -| Distributions | [core], [contrib], [aws], [liatrio], [observiq], [redhat], [splunk], [sumo] | +| Distributions | [core], [contrib], [k8s] | | Warnings | [Orphaned Telemetry, Other](#warnings) | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Ffilter%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Ffilter) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Ffilter%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Ffilter) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@boostchicken](https://www.github.com/boostchicken) | -[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[alpha]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#alpha [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib -[aws]: https://github.com/aws-observability/aws-otel-collector -[liatrio]: https://github.com/liatrio/liatrio-otel-collector -[observiq]: https://github.com/observIQ/observiq-otel-collector -[redhat]: https://github.com/os-observability/redhat-opentelemetry-collector -[splunk]: https://github.com/signalfx/splunk-otel-collector -[sumo]: https://github.com/SumoLogic/sumologic-otel-collector +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s The filterprocessor allows dropping spans, span events, metrics, datapoints, and logs from the collector. @@ -38,7 +33,7 @@ See the table below for details on each context and the fields it exposes. | `logs.log_record` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) | The OTTL allows the use of `and`, `or`, and `()` in conditions. -See [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md#boolean-expressions) for more details. +See [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/LANGUAGE.md#boolean-expressions) for more details. For conditions that apply to the same signal, such as spans and span events, if the "higher" level telemetry matches a condition and is dropped, the "lower" level condition will not be checked. This means that if a span is dropped but a span event condition was defined, the span event condition will not be checked for that span. @@ -111,7 +106,7 @@ processors: error_mode: ignore metrics: datapoint: - - metric.name == "k8s.pod.phase" && value_int == 4 + - metric.name == "k8s.pod.phase" and value_int == 4 ``` #### Dropping non-HTTP spans @@ -149,23 +144,83 @@ In addition, the processor defines a few of its own functions: `HasAttrKeyOnDatapoint(key)` Returns `true` if the given key appears in the attribute map of any datapoint on a metric. -`key` must be a string. +`key` must be a string. You must use the `metrics.metric` context. Examples: - `HasAttrKeyOnDatapoint("http.method")` +```yaml +# Drops metrics containing the 'bad.metric' attribute key +filter/keep_good_metrics: + error_mode: ignore + metrics: + metric: + - 'HasAttrKeyOnDatapoint("bad.metric")' +``` + #### HasAttrOnDatapoint `HasAttrOnDatapoint(key, value)` Returns `true` if the given key and value appears in the attribute map of any datapoint on a metric. -`key` and `value` must both be strings. +`key` and `value` must both be strings. If the value of the attribute on the datapoint is not a string, `value` will be compared to `""`. You must use the `metrics.metric` context. Examples: - `HasAttrOnDatapoint("http.method", "GET")` +```yaml +# Drops metrics containing the 'bad.metric' attribute key and 'true' value +filter/keep_good_metrics: + error_mode: ignore + metrics: + metric: + - 'HasAttrOnDatapoint("bad.metric", "true")' +``` + +## Troubleshooting + +When using OTTL you can enable debug logging in the collector to print out useful information, +such as if the condition matched and the TransformContext used in the condition, to help you troubleshoot +why a condition is not behaving as you expect. This feature is very verbose, but provides you an accurate +view into how OTTL views the underlying data. + +```yaml +receivers: + filelog: + start_at: beginning + include: [ /Users/tylerhelmuth/projects/opentelemetry-collector-contrib/local/test.log ] + + +processors: + filter: + error_mode: ignore + logs: + log_record: + - body == "test" + +exporters: + debug: + +service: + telemetry: + logs: + level: debug + pipelines: + logs: + receivers: + - filelog + processors: + - filter + exporters: + - debug +``` + +``` +2024-05-29T16:47:04.362-0600 debug ottl@v0.101.0/parser.go:338 condition evaluation result {"kind": "processor", "name": "filter", "pipeline": "logs", "condition": "body == \"test\"", "match": true, "TransformContext": {"resource": {"attributes": {}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022824262063000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +``` + ## Warnings In general, understand your data before using the filter processor. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md new file mode 100644 index 00000000000..d82c6d106bc --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md @@ -0,0 +1,31 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# filter + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_processor_filter_datapoints.filtered + +Number of metric data points dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_processor_filter_logs.filtered + +Number of logs dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_processor_filter_spans.filtered + +Number of spans dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go index 894a190b2c5..22c3a443f4b 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go @@ -36,7 +36,7 @@ func createDefaultConfig() component.Config { func createMetricsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { @@ -44,7 +44,7 @@ func createMetricsProcessor( if err != nil { return nil, err } - return processorhelper.NewMetricsProcessor( + return processorhelper.NewMetrics( ctx, set, cfg, @@ -55,7 +55,7 @@ func createMetricsProcessor( func createLogsProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (processor.Logs, error) { @@ -63,7 +63,7 @@ func createLogsProcessor( if err != nil { return nil, err } - return processorhelper.NewLogsProcessor( + return processorhelper.NewLogs( ctx, set, cfg, @@ -74,7 +74,7 @@ func createLogsProcessor( func createTracesProcessor( ctx context.Context, - set processor.CreateSettings, + set processor.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { @@ -82,7 +82,7 @@ func createTracesProcessor( if err != nil { return nil, err } - return processorhelper.NewTracesProcessor( + return processorhelper.NewTraces( ctx, set, cfg, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go index 93587e1c6d0..87b7737b797 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go @@ -4,12 +4,11 @@ package metadata import ( "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" ) var ( - Type = component.MustNewType("filter") + Type = component.MustNewType("filter") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" ) const ( @@ -17,11 +16,3 @@ const ( MetricsStability = component.StabilityLevelAlpha LogsStability = component.StabilityLevelAlpha ) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/filter") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/filter") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000000..ea918acc418 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go @@ -0,0 +1,79 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "errors" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ProcessorFilterDatapointsFiltered metric.Int64Counter + ProcessorFilterLogsFiltered metric.Int64Counter + ProcessorFilterSpansFiltered metric.Int64Counter +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.ProcessorFilterDatapointsFiltered, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_filter_datapoints.filtered", + metric.WithDescription("Number of metric data points dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorFilterLogsFiltered, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_filter_logs.filtered", + metric.WithDescription("Number of logs dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorFilterSpansFiltered, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_filter_spans.filtered", + metric.WithDescription("Number of spans dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go index 421eea9c8b6..7cd8be05ec1 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go @@ -8,6 +8,7 @@ import ( "fmt" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/multierr" @@ -22,16 +23,16 @@ import ( type filterLogProcessor struct { skipExpr expr.BoolExpr[ottllog.TransformContext] - telemetry *filterProcessorTelemetry + telemetry *filterTelemetry logger *zap.Logger } -func newFilterLogsProcessor(set processor.CreateSettings, cfg *Config) (*filterLogProcessor, error) { +func newFilterLogsProcessor(set processor.Settings, cfg *Config) (*filterLogProcessor, error) { flp := &filterLogProcessor{ logger: set.Logger, } - fpt, err := newfilterProcessorTelemetry(set) + fpt, err := newFilterTelemetry(set, pipeline.SignalLogs) if err != nil { return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) } @@ -78,7 +79,7 @@ func (flp *filterLogProcessor) processLogs(ctx context.Context, ld plog.Logs) (p scope := sl.Scope() lrs := sl.LogRecords() lrs.RemoveIf(func(lr plog.LogRecord) bool { - skip, err := flp.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, scope, resource)) + skip, err := flp.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, scope, resource, sl, rl)) if err != nil { errors = multierr.Append(errors, err) return false @@ -92,7 +93,7 @@ func (flp *filterLogProcessor) processLogs(ctx context.Context, ld plog.Logs) (p }) logCountAfterFilters := ld.LogRecordCount() - flp.telemetry.record(triggerLogsDropped, int64(logCountBeforeFilters-logCountAfterFilters)) + flp.telemetry.record(ctx, int64(logCountBeforeFilters-logCountAfterFilters)) if errors != nil { flp.logger.Error("failed processing logs", zap.Error(errors)) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml index 29e2e7f792c..bfccf18b937 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml @@ -1,13 +1,36 @@ type: filter -scope_name: otelcol/filter status: class: processor stability: alpha: [traces, metrics, logs] - distributions: [core, contrib, splunk, observiq, sumo, aws, redhat, liatrio] + distributions: [core, contrib, k8s] warnings: [Orphaned Telemetry, Other] codeowners: active: [TylerHelmuth, boostchicken] tests: config: + +telemetry: + metrics: + processor_filter_datapoints.filtered: + enabled: true + description: Number of metric data points dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true + processor_filter_logs.filtered: + enabled: true + description: Number of logs dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true + processor_filter_spans.filtered: + enabled: true + description: Number of spans dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go index 10551d3dd11..63beb811e2d 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/multierr" @@ -29,17 +30,17 @@ type filterMetricProcessor struct { skipResourceExpr expr.BoolExpr[ottlresource.TransformContext] skipMetricExpr expr.BoolExpr[ottlmetric.TransformContext] skipDataPointExpr expr.BoolExpr[ottldatapoint.TransformContext] - telemetry *filterProcessorTelemetry + telemetry *filterTelemetry logger *zap.Logger } -func newFilterMetricProcessor(set processor.CreateSettings, cfg *Config) (*filterMetricProcessor, error) { +func newFilterMetricProcessor(set processor.Settings, cfg *Config) (*filterMetricProcessor, error) { var err error fsp := &filterMetricProcessor{ logger: set.Logger, } - fpt, err := newfilterProcessorTelemetry(set) + fpt, err := newFilterTelemetry(set, pipeline.SignalMetrics) if err != nil { return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) } @@ -122,7 +123,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric md.ResourceMetrics().RemoveIf(func(rmetrics pmetric.ResourceMetrics) bool { resource := rmetrics.Resource() if fmp.skipResourceExpr != nil { - skip, err := fmp.skipResourceExpr.Eval(ctx, ottlresource.NewTransformContext(resource)) + skip, err := fmp.skipResourceExpr.Eval(ctx, ottlresource.NewTransformContext(resource, rmetrics)) if err != nil { errors = multierr.Append(errors, err) return false @@ -135,7 +136,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric scope := smetrics.Scope() smetrics.Metrics().RemoveIf(func(metric pmetric.Metric) bool { if fmp.skipMetricExpr != nil { - skip, err := fmp.skipMetricExpr.Eval(ctx, ottlmetric.NewTransformContext(metric, smetrics.Metrics(), scope, resource)) + skip, err := fmp.skipMetricExpr.Eval(ctx, ottlmetric.NewTransformContext(metric, smetrics.Metrics(), scope, resource, smetrics, rmetrics)) if err != nil { errors = multierr.Append(errors, err) } @@ -173,7 +174,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric }) metricDataPointCountAfterFilters := md.DataPointCount() - fmp.telemetry.record(triggerMetricDataPointsDropped, int64(metricDataPointCountBeforeFilters-metricDataPointCountAfterFilters)) + fmp.telemetry.record(ctx, int64(metricDataPointCountBeforeFilters-metricDataPointCountAfterFilters)) if errors != nil { fmp.logger.Error("failed processing metrics", zap.Error(errors)) @@ -259,7 +260,7 @@ func newResExpr(mp *filterconfig.MetricMatchProperties) (expr.BoolExpr[ottlresou func (fmp *filterMetricProcessor) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.NumberDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false @@ -272,7 +273,7 @@ func (fmp *filterMetricProcessor) handleNumberDataPoints(ctx context.Context, dp func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.HistogramDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false @@ -285,7 +286,7 @@ func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.ExponentialHistogramDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false @@ -298,7 +299,7 @@ func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx contex func (fmp *filterMetricProcessor) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.SummaryDataPoint) bool { - skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource)) + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) if err != nil { errors = multierr.Append(errors, err) return false diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go index 9267a142475..dd21cf783ce 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go @@ -5,84 +5,45 @@ package filterprocessor // import "github.com/open-telemetry/opentelemetry-colle import ( "context" + "fmt" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/processor/processorhelper" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata" ) -type trigger int - -const ( - triggerMetricDataPointsDropped trigger = iota - triggerLogsDropped - triggerSpansDropped -) - -type filterProcessorTelemetry struct { - exportCtx context.Context - - processorAttr []attribute.KeyValue - - datapointsFiltered metric.Int64Counter - logsFiltered metric.Int64Counter - spansFiltered metric.Int64Counter +type filterTelemetry struct { + attr metric.MeasurementOption + counter metric.Int64Counter } -func newfilterProcessorTelemetry(set processor.CreateSettings) (*filterProcessorTelemetry, error) { - processorID := set.ID.String() - - fpt := &filterProcessorTelemetry{ - processorAttr: []attribute.KeyValue{attribute.String(metadata.Type.String(), processorID)}, - exportCtx: context.Background(), - } - - counter, err := metadata.Meter(set.TelemetrySettings).Int64Counter( - processorhelper.BuildCustomMetricName(metadata.Type.String(), "datapoints.filtered"), - metric.WithDescription("Number of metric data points dropped by the filter processor"), - metric.WithUnit("1"), - ) +func newFilterTelemetry(set processor.Settings, signal pipeline.Signal) (*filterTelemetry, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } - fpt.datapointsFiltered = counter - counter, err = metadata.Meter(set.TelemetrySettings).Int64Counter( - processorhelper.BuildCustomMetricName(metadata.Type.String(), "logs.filtered"), - metric.WithDescription("Number of logs dropped by the filter processor"), - metric.WithUnit("1"), - ) - if err != nil { - return nil, err - } - fpt.logsFiltered = counter - - counter, err = metadata.Meter(set.TelemetrySettings).Int64Counter( - processorhelper.BuildCustomMetricName(metadata.Type.String(), "spans.filtered"), - metric.WithDescription("Number of spans dropped by the filter processor"), - metric.WithUnit("1"), - ) - if err != nil { - return nil, err + var counter metric.Int64Counter + switch signal { + case pipeline.SignalMetrics: + counter = telemetryBuilder.ProcessorFilterDatapointsFiltered + case pipeline.SignalLogs: + counter = telemetryBuilder.ProcessorFilterLogsFiltered + case pipeline.SignalTraces: + counter = telemetryBuilder.ProcessorFilterSpansFiltered + default: + return nil, fmt.Errorf("unsupported signal type: %v", signal) } - fpt.spansFiltered = counter - return fpt, nil + return &filterTelemetry{ + attr: metric.WithAttributeSet(attribute.NewSet(attribute.String(metadata.Type.String(), set.ID.String()))), + counter: counter, + }, nil } -func (fpt *filterProcessorTelemetry) record(trigger trigger, dropped int64) { - var triggerMeasure metric.Int64Counter - switch trigger { - case triggerMetricDataPointsDropped: - triggerMeasure = fpt.datapointsFiltered - case triggerLogsDropped: - triggerMeasure = fpt.logsFiltered - case triggerSpansDropped: - triggerMeasure = fpt.spansFiltered - } - - triggerMeasure.Add(fpt.exportCtx, dropped, metric.WithAttributes(fpt.processorAttr...)) +func (fpt *filterTelemetry) record(ctx context.Context, dropped int64) { + fpt.counter.Add(ctx, dropped, fpt.attr) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go index ff64e85470d..3b444cd5897 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go @@ -8,6 +8,7 @@ import ( "fmt" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/multierr" @@ -23,17 +24,17 @@ import ( type filterSpanProcessor struct { skipSpanExpr expr.BoolExpr[ottlspan.TransformContext] skipSpanEventExpr expr.BoolExpr[ottlspanevent.TransformContext] - telemetry *filterProcessorTelemetry + telemetry *filterTelemetry logger *zap.Logger } -func newFilterSpansProcessor(set processor.CreateSettings, cfg *Config) (*filterSpanProcessor, error) { +func newFilterSpansProcessor(set processor.Settings, cfg *Config) (*filterSpanProcessor, error) { var err error fsp := &filterSpanProcessor{ logger: set.Logger, } - fpt, err := newfilterProcessorTelemetry(set) + fpt, err := newFilterTelemetry(set, pipeline.SignalTraces) if err != nil { return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) } @@ -93,7 +94,7 @@ func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Tra scope := ss.Scope() ss.Spans().RemoveIf(func(span ptrace.Span) bool { if fsp.skipSpanExpr != nil { - skip, err := fsp.skipSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource)) + skip, err := fsp.skipSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ss, rs)) if err != nil { errors = multierr.Append(errors, err) return false @@ -104,7 +105,7 @@ func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Tra } if fsp.skipSpanEventExpr != nil { span.Events().RemoveIf(func(spanEvent ptrace.SpanEvent) bool { - skip, err := fsp.skipSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvent, span, scope, resource)) + skip, err := fsp.skipSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvent, span, scope, resource, ss, rs)) if err != nil { errors = multierr.Append(errors, err) return false @@ -120,7 +121,7 @@ func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Tra }) spanCountAfterFilters := td.SpanCount() - fsp.telemetry.record(triggerSpansDropped, int64(spanCountBeforeFilters-spanCountAfterFilters)) + fsp.telemetry.record(ctx, int64(spanCountBeforeFilters-spanCountAfterFilters)) if errors != nil { fsp.logger.Error("failed processing traces", zap.Error(errors)) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md index f94995852db..6039fefa590 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [beta]: traces | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fjaeger%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fjaeger) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fjaeger%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fjaeger) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@yurishkuro](https://www.github.com/yurishkuro) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s Receives trace data in [Jaeger](https://www.jaegertracing.io/) format. @@ -22,13 +23,12 @@ named under the `protocols` object for the jaeger receiver to start. The below protocols are supported, each supports an optional `endpoint` object configuration parameter. -- `grpc` (default `endpoint` = 0.0.0.0:14250) -- `thrift_binary` (default `endpoint` = 0.0.0.0:6832) -- `thrift_compact` (default `endpoint` = 0.0.0.0:6831) -- `thrift_http` (default `endpoint` = 0.0.0.0:14268) +- `grpc` (default `endpoint` = localhost:14250) +- `thrift_binary` (default `endpoint` = localhost:6832) +- `thrift_compact` (default `endpoint` = localhost:6831) +- `thrift_http` (default `endpoint` = localhost:14268) -The `component.UseLocalHostAsDefaultHost` feature gate changes these endpoints to localhost:14250, localhost:6832, -localhost:6831 and localhost:14268. This will become the default in a future release. +See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. Examples: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go index 4ed9dda665c..bfa43c09e3c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/config.go @@ -4,6 +4,7 @@ package jaegerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" import ( + "errors" "fmt" "net" "strconv" @@ -72,8 +73,10 @@ type Config struct { RemoteSampling *RemoteSamplingConfig `mapstructure:"remote_sampling"` } -var _ component.Config = (*Config)(nil) -var _ confmap.Unmarshaler = (*Config)(nil) +var ( + _ component.Config = (*Config)(nil) + _ confmap.Unmarshaler = (*Config)(nil) +) // Validate checks the receiver configuration is valid func (cfg *Config) Validate() error { @@ -81,7 +84,7 @@ func (cfg *Config) Validate() error { cfg.ThriftHTTP == nil && cfg.ThriftBinary == nil && cfg.ThriftCompact == nil { - return fmt.Errorf("must specify at least one protocol when using the Jaeger receiver") + return errors.New("must specify at least one protocol when using the Jaeger receiver") } if cfg.GRPC != nil { @@ -110,7 +113,7 @@ func (cfg *Config) Validate() error { if cfg.RemoteSampling != nil { if disableJaegerReceiverRemoteSampling.IsEnabled() { - return fmt.Errorf("remote sampling config detected in the Jaeger receiver; use the `jaegerremotesampling` extension instead") + return errors.New("remote sampling config detected in the Jaeger receiver; use the `jaegerremotesampling` extension instead") } } @@ -120,7 +123,7 @@ func (cfg *Config) Validate() error { // Unmarshal a config.Parser into the config struct. func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { if componentParser == nil || len(componentParser.AllKeys()) == 0 { - return fmt.Errorf("empty config for Jaeger receiver") + return errors.New("empty config for Jaeger receiver") } // UnmarshalExact will not set struct properties to nil even if no key is provided, @@ -163,7 +166,7 @@ func checkPortFromEndpoint(endpoint string) error { return fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { - return fmt.Errorf("port number must be between 1 and 65535") + return errors.New("port number must be between 1 and 65535") } return nil } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go index 8009ed1b2d9..7519e9148fb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/factory.go @@ -7,7 +7,6 @@ package jaegerreceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "context" - "sync" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" @@ -16,9 +15,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/receiver" - "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata" ) @@ -42,15 +40,6 @@ var disableJaegerReceiverRemoteSampling = featuregate.GlobalRegistry().MustRegis featuregate.WithRegisterDescription("When enabled, the Jaeger Receiver will fail to start when it is configured with remote_sampling config. When disabled, the receiver will start and the remote_sampling config will be no-op."), ) -var once sync.Once - -func logDeprecation(logger *zap.Logger) { - once.Do(func() { - logger.Warn("jaeger receiver will deprecate Thrift-gen and replace it with Proto-gen to be compatbible to jaeger 1.42.0 and higher. See https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/18485 for more details.") - - }) -} - // NewFactory creates a new Jaeger receiver factory. func NewFactory() receiver.Factory { return receiver.NewFactory( @@ -65,19 +54,19 @@ func createDefaultConfig() component.Config { Protocols: Protocols{ GRPC: &configgrpc.ServerConfig{ NetAddr: confignet.AddrConfig{ - Endpoint: localhostgate.EndpointForPort(defaultGRPCPort), + Endpoint: testutil.EndpointForPort(defaultGRPCPort), Transport: confignet.TransportTypeTCP, }, }, ThriftHTTP: &confighttp.ServerConfig{ - Endpoint: localhostgate.EndpointForPort(defaultHTTPPort), + Endpoint: testutil.EndpointForPort(defaultHTTPPort), }, ThriftBinary: &ProtocolUDP{ - Endpoint: localhostgate.EndpointForPort(defaultThriftBinaryPort), + Endpoint: testutil.EndpointForPort(defaultThriftBinaryPort), ServerConfigUDP: defaultServerConfigUDP(), }, ThriftCompact: &ProtocolUDP{ - Endpoint: localhostgate.EndpointForPort(defaultThriftCompactPort), + Endpoint: testutil.EndpointForPort(defaultThriftCompactPort), ServerConfigUDP: defaultServerConfigUDP(), }, }, @@ -87,12 +76,10 @@ func createDefaultConfig() component.Config { // createTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { - logDeprecation(set.Logger) - // Convert settings in the source config to configuration struct // that Jaeger receiver understands. // Error handling for the conversion is done in the Validate function from the Config object itself. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go index b9e247584ad..0d5b282e3d8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("jaeger") + Type = component.MustNewType("jaeger") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index ec58059c781..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/jaegerreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/jaegerreceiver") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml index 0fd95be9d3a..8ad64a435fd 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/metadata.yaml @@ -1,13 +1,10 @@ type: jaeger -scope_name: otelcol/jaegerreceiver status: class: receiver stability: beta: [traces] - distributions: - - core - - contrib + distributions: [core, contrib, k8s] codeowners: active: [yurishkuro] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go index 764b55312dd..3e3b44c83e7 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/trace_receiver.go @@ -28,6 +28,7 @@ import ( "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" @@ -66,7 +67,7 @@ type jReceiver struct { goroutines sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings grpcObsrecv *receiverhelper.ObsReport httpObsrecv *receiverhelper.ObsReport @@ -82,12 +83,10 @@ const ( protobufFormat = "protobuf" ) -var ( - acceptedThriftFormats = map[string]struct{}{ - "application/x-thrift": {}, - "application/vnd.apache.thrift.binary": {}, - } -) +var acceptedThriftFormats = map[string]struct{}{ + "application/x-thrift": {}, + "application/vnd.apache.thrift.binary": {}, +} // newJaegerReceiver creates a TracesReceiver that receives traffic as a Jaeger collector, and // also as a Jaeger agent. @@ -95,7 +94,7 @@ func newJaegerReceiver( id component.ID, config *configuration, nextConsumer consumer.Traces, - set receiver.CreateSettings, + set receiver.Settings, ) (*jReceiver, error) { grpcObsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: id, @@ -125,7 +124,7 @@ func newJaegerReceiver( } func (jr *jReceiver) Start(ctx context.Context, host component.Host) error { - if err := jr.startAgent(); err != nil { + if err := jr.startAgent(host); err != nil { return err } @@ -168,11 +167,13 @@ func consumeTraces(ctx context.Context, batch *jaeger.Batch, consumer consumer.T return len(batch.Spans), consumer.ConsumeTraces(ctx, td) } -var _ agent.Agent = (*agentHandler)(nil) -var _ api_v2.CollectorServiceServer = (*jReceiver)(nil) -var _ configmanager.ClientConfigManager = (*notImplementedConfigManager)(nil) +var ( + _ agent.Agent = (*agentHandler)(nil) + _ api_v2.CollectorServiceServer = (*jReceiver)(nil) + _ configmanager.ClientConfigManager = (*notImplementedConfigManager)(nil) +) -var errNotImplemented = fmt.Errorf("not implemented") +var errNotImplemented = errors.New("not implemented") type notImplementedConfigManager struct{} @@ -222,7 +223,7 @@ func (jr *jReceiver) PostSpans(ctx context.Context, r *api_v2.PostSpansRequest) return &api_v2.PostSpansResponse{}, nil } -func (jr *jReceiver) startAgent() error { +func (jr *jReceiver) startAgent(host component.Host) error { if jr.config == nil { return nil } @@ -283,7 +284,7 @@ func (jr *jReceiver) startAgent() error { go func() { defer jr.goroutines.Done() if err := jr.agentServer.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) && err != nil { - jr.settings.ReportStatus(component.NewFatalErrorEvent(fmt.Errorf("jaeger agent server error: %w", err))) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(fmt.Errorf("jaeger agent server error: %w", err))) } }() } @@ -391,7 +392,7 @@ func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) er go func() { defer jr.goroutines.Done() if errHTTP := jr.collectorServer.Serve(cln); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { - jr.settings.ReportStatus(component.NewFatalErrorEvent(errHTTP)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() } @@ -414,7 +415,7 @@ func (jr *jReceiver) startCollector(ctx context.Context, host component.Host) er go func() { defer jr.goroutines.Done() if errGrpc := jr.grpc.Serve(ln); !errors.Is(errGrpc, grpc.ErrServerStopped) && errGrpc != nil { - jr.settings.ReportStatus(component.NewFatalErrorEvent(errGrpc)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc)) } }() } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md index 6432ee25658..b7b25c4eadb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/README.md @@ -8,7 +8,7 @@ | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fkafka%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fkafka) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fkafka%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fkafka) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@pavolloffay](https://www.github.com/pavolloffay), [@MovieStoreGuy](https://www.github.com/MovieStoreGuy) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib @@ -27,9 +27,11 @@ The following settings can be optionally configured: - `brokers` (default = localhost:9092): The list of kafka brokers - `resolve_canonical_bootstrap_servers_only` (default = false): Whether to resolve then reverse-lookup broker IPs during startup -- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to read from -- `encoding` (default = otlp_proto): The encoding of the payload received from kafka. Available encodings: +- `topic` (default = otlp_spans for traces, otlp_metrics for metrics, otlp_logs for logs): The name of the kafka topic to read from. + Only one telemetry type may be used for a given topic. +- `encoding` (default = otlp_proto): The encoding of the payload received from kafka. Supports encoding extensions. Tries to load an encoding extension and falls back to internal encodings if no extension was loaded. Available internal encodings: - `otlp_proto`: the payload is deserialized to `ExportTraceServiceRequest`, `ExportLogsServiceRequest` or `ExportMetricsServiceRequest` respectively. + - `otlp_json`: the payload is deserialized to `ExportTraceServiceRequest` `ExportLogsServiceRequest` or `ExportMetricsServiceRequest` respectively using JSON encoding. - `jaeger_proto`: the payload is deserialized to a single Jaeger proto `Span`. - `jaeger_json`: the payload is deserialized to a single Jaeger JSON Span using `jsonpb`. - `zipkin_proto`: the payload is deserialized into a list of Zipkin proto spans. @@ -42,6 +44,11 @@ The following settings can be optionally configured: - `group_id` (default = otel-collector): The consumer group that receiver will be consuming messages from - `client_id` (default = otel-collector): The consumer client ID that receiver will use - `initial_offset` (default = latest): The initial offset to use if no offset was previously committed. Must be `latest` or `earliest`. +- `session_timeout` (default = `10s`): The request timeout for detecting client failures when using Kafka’s group management facilities. +- `heartbeat_interval` (default = `3s`): The expected time between heartbeats to the consumer coordinator when using Kafka’s group management facilities. +- `min_fetch_size` (default = `1`): The minimum number of message bytes to fetch in a request, defaults to 1 byte. +- `default_fetch_size` (default = `1048576`): The default number of message bytes to fetch in a request, defaults to 1MB. +- `max_fetch_size` (default = `0`): The maximum number of message bytes to fetch in a request, defaults to unlimited. - `auth` - `plain_text` - `username`: The username to use. @@ -49,8 +56,8 @@ The following settings can be optionally configured: - `sasl` - `username`: The username to use. - `password`: The password to use - - `mechanism`: The sasl mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM or PLAIN) - - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM mechanism + - `mechanism`: The sasl mechanism to use (SCRAM-SHA-256, SCRAM-SHA-512, AWS_MSK_IAM, AWS_MSK_IAM_OAUTHBEARER or PLAIN) + - `aws_msk.region`: AWS Region in case of AWS_MSK_IAM or AWS_MSK_IAM_OAUTHBEARER mechanism - `aws_msk.broker_addr`: MSK Broker address in case of AWS_MSK_IAM mechanism - `tls` - `ca_file`: path to the CA cert. For a client this verifies the server certificate. Should @@ -71,6 +78,7 @@ The following settings can be optionally configured: - `password`: The Kerberos password used for authenticate with KDC - `config_file`: Path to Kerberos configuration. i.e /etc/krb5.conf - `keytab_file`: Path to keytab file. i.e /etc/security/kafka.keytab + - `disable_fast_negotiation`: Disable PA-FX-FAST negotiation (Pre-Authentication Framework - Fast). Some common Kerberos implementations do not support PA-FX-FAST negotiation. This is set to `false` by default. - `metadata` - `full` (default = true): Whether to maintain a full set of metadata. When disabled, the client does not make the initial request to broker at the diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go index 2f2eff8e6d5..5b25c7c7c5f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/config.go @@ -48,6 +48,10 @@ type Config struct { ResolveCanonicalBootstrapServersOnly bool `mapstructure:"resolve_canonical_bootstrap_servers_only"` // Kafka protocol version ProtocolVersion string `mapstructure:"protocol_version"` + // Session interval for the Kafka consumer + SessionTimeout time.Duration `mapstructure:"session_timeout"` + // Heartbeat interval for the Kafka consumer + HeartbeatInterval time.Duration `mapstructure:"heartbeat_interval"` // The name of the kafka topic to consume from (default "otlp_spans" for traces, "otlp_metrics" for metrics, "otlp_logs" for logs) Topic string `mapstructure:"topic"` // Encoding of the messages (default "otlp_proto") @@ -74,6 +78,13 @@ type Config struct { // Extract headers from kafka records HeaderExtraction HeaderExtraction `mapstructure:"header_extraction"` + + // The minimum bytes per fetch from Kafka (default "1") + MinFetchSize int32 `mapstructure:"min_fetch_size"` + // The default bytes per fetch from Kafka (default "1048576") + DefaultFetchSize int32 `mapstructure:"default_fetch_size"` + // The maximum bytes per fetch from Kafka (default "0", no limit) + MaxFetchSize int32 `mapstructure:"max_fetch_size"` } const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md new file mode 100644 index 00000000000..7f65b8ef691 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/documentation.md @@ -0,0 +1,71 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# kafka + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_kafka_receiver_current_offset + +Current message offset + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### otelcol_kafka_receiver_messages + +Number of received messages + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_offset_lag + +Current offset lag + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +### otelcol_kafka_receiver_partition_close + +Number of finished partitions + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_partition_start + +Number of started partitions + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_unmarshal_failed_log_records + +Number of log records failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_unmarshal_failed_metric_points + +Number of metric points failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### otelcol_kafka_receiver_unmarshal_failed_spans + +Number of spans failed to be unmarshaled + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go index 7552f0c4653..be413ec507c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/factory.go @@ -5,11 +5,10 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collect import ( "context" - "fmt" + "errors" "strings" "time" - "go.opencensus.io/stats/view" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" @@ -19,14 +18,16 @@ import ( ) const ( - defaultTracesTopic = "otlp_spans" - defaultMetricsTopic = "otlp_metrics" - defaultLogsTopic = "otlp_logs" - defaultEncoding = "otlp_proto" - defaultBroker = "localhost:9092" - defaultClientID = "otel-collector" - defaultGroupID = defaultClientID - defaultInitialOffset = offsetLatest + defaultTracesTopic = "otlp_spans" + defaultMetricsTopic = "otlp_metrics" + defaultLogsTopic = "otlp_logs" + defaultEncoding = "otlp_proto" + defaultBroker = "localhost:9092" + defaultClientID = "otel-collector" + defaultGroupID = defaultClientID + defaultInitialOffset = offsetLatest + defaultSessionTimeout = 10 * time.Second + defaultHeartbeatInterval = 3 * time.Second // default from sarama.NewConfig() defaultMetadataRetryMax = 3 @@ -39,49 +40,23 @@ const ( defaultAutoCommitEnable = true // default from sarama.NewConfig() defaultAutoCommitInterval = 1 * time.Second + + // default from sarama.NewConfig() + defaultMinFetchSize = int32(1) + // default from sarama.NewConfig() + defaultDefaultFetchSize = int32(1048576) + // default from sarama.NewConfig() + defaultMaxFetchSize = int32(0) ) -var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") +var errUnrecognizedEncoding = errors.New("unrecognized encoding") // FactoryOption applies changes to kafkaExporterFactory. type FactoryOption func(factory *kafkaReceiverFactory) -// withTracesUnmarshalers adds Unmarshalers. -func withTracesUnmarshalers(tracesUnmarshalers ...TracesUnmarshaler) FactoryOption { - return func(factory *kafkaReceiverFactory) { - for _, unmarshaler := range tracesUnmarshalers { - factory.tracesUnmarshalers[unmarshaler.Encoding()] = unmarshaler - } - } -} - -// withMetricsUnmarshalers adds MetricsUnmarshalers. -func withMetricsUnmarshalers(metricsUnmarshalers ...MetricsUnmarshaler) FactoryOption { - return func(factory *kafkaReceiverFactory) { - for _, unmarshaler := range metricsUnmarshalers { - factory.metricsUnmarshalers[unmarshaler.Encoding()] = unmarshaler - } - } -} - -// withLogsUnmarshalers adds LogsUnmarshalers. -func withLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { - return func(factory *kafkaReceiverFactory) { - for _, unmarshaler := range logsUnmarshalers { - factory.logsUnmarshalers[unmarshaler.Encoding()] = unmarshaler - } - } -} - // NewFactory creates Kafka receiver factory. func NewFactory(options ...FactoryOption) receiver.Factory { - _ = view.Register(metricViews()...) - - f := &kafkaReceiverFactory{ - tracesUnmarshalers: map[string]TracesUnmarshaler{}, - metricsUnmarshalers: map[string]MetricsUnmarshaler{}, - logsUnmarshalers: map[string]LogsUnmarshaler{}, - } + f := &kafkaReceiverFactory{} for _, o := range options { o(f) } @@ -96,11 +71,13 @@ func NewFactory(options ...FactoryOption) receiver.Factory { func createDefaultConfig() component.Config { return &Config{ - Encoding: defaultEncoding, - Brokers: []string{defaultBroker}, - ClientID: defaultClientID, - GroupID: defaultGroupID, - InitialOffset: defaultInitialOffset, + Encoding: defaultEncoding, + Brokers: []string{defaultBroker}, + ClientID: defaultClientID, + GroupID: defaultGroupID, + InitialOffset: defaultInitialOffset, + SessionTimeout: defaultSessionTimeout, + HeartbeatInterval: defaultHeartbeatInterval, Metadata: kafkaexporter.Metadata{ Full: defaultMetadataFull, Retry: kafkaexporter.MetadataRetry{ @@ -119,35 +96,26 @@ func createDefaultConfig() component.Config { HeaderExtraction: HeaderExtraction{ ExtractHeaders: false, }, + MinFetchSize: defaultMinFetchSize, + DefaultFetchSize: defaultDefaultFetchSize, + MaxFetchSize: defaultMaxFetchSize, } } -type kafkaReceiverFactory struct { - tracesUnmarshalers map[string]TracesUnmarshaler - metricsUnmarshalers map[string]MetricsUnmarshaler - logsUnmarshalers map[string]LogsUnmarshaler -} +type kafkaReceiverFactory struct{} func (f *kafkaReceiverFactory) createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { - for encoding, unmarshal := range defaultTracesUnmarshalers() { - f.tracesUnmarshalers[encoding] = unmarshal - } - oCfg := *(cfg.(*Config)) if oCfg.Topic == "" { oCfg.Topic = defaultTracesTopic } - unmarshaler := f.tracesUnmarshalers[oCfg.Encoding] - if unmarshaler == nil { - return nil, errUnrecognizedEncoding - } - r, err := newTracesReceiver(oCfg, set, unmarshaler, nextConsumer) + r, err := newTracesReceiver(oCfg, set, nextConsumer) if err != nil { return nil, err } @@ -156,24 +124,16 @@ func (f *kafkaReceiverFactory) createTracesReceiver( func (f *kafkaReceiverFactory) createMetricsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { - for encoding, unmarshal := range defaultMetricsUnmarshalers() { - f.metricsUnmarshalers[encoding] = unmarshal - } - oCfg := *(cfg.(*Config)) if oCfg.Topic == "" { oCfg.Topic = defaultMetricsTopic } - unmarshaler := f.metricsUnmarshalers[oCfg.Encoding] - if unmarshaler == nil { - return nil, errUnrecognizedEncoding - } - r, err := newMetricsReceiver(oCfg, set, unmarshaler, nextConsumer) + r, err := newMetricsReceiver(oCfg, set, nextConsumer) if err != nil { return nil, err } @@ -182,24 +142,16 @@ func (f *kafkaReceiverFactory) createMetricsReceiver( func (f *kafkaReceiverFactory) createLogsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Logs, ) (receiver.Logs, error) { - for encoding, unmarshaler := range defaultLogsUnmarshalers(set.BuildInfo.Version, set.Logger) { - f.logsUnmarshalers[encoding] = unmarshaler - } - oCfg := *(cfg.(*Config)) if oCfg.Topic == "" { oCfg.Topic = defaultLogsTopic } - unmarshaler, err := getLogsUnmarshaler(oCfg.Encoding, f.logsUnmarshalers) - if err != nil { - return nil, err - } - r, err := newLogsReceiver(oCfg, set, unmarshaler, nextConsumer) + r, err := newLogsReceiver(oCfg, set, nextConsumer) if err != nil { return nil, err } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go index 265c84fb33d..efae723c201 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/header_extraction.go @@ -4,8 +4,6 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" import ( - "fmt" - "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -14,7 +12,7 @@ import ( ) func getAttribute(key string) string { - return fmt.Sprintf("kafka.header.%s", key) + return "kafka.header." + key } type HeaderExtractor interface { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go index 39587e27617..2ed23395790 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("kafka") + Type = component.MustNewType("kafka") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go index d60cbac9b5b..87c954834ef 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata/generated_telemetry.go @@ -3,15 +3,112 @@ package metadata import ( - "go.opentelemetry.io/collector/component" + "errors" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" ) func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/kafkareceiver") + return settings.MeterProvider.Meter("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver") } func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/kafkareceiver") + return settings.TracerProvider.Tracer("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + KafkaReceiverCurrentOffset metric.Int64Gauge + KafkaReceiverMessages metric.Int64Counter + KafkaReceiverOffsetLag metric.Int64Gauge + KafkaReceiverPartitionClose metric.Int64Counter + KafkaReceiverPartitionStart metric.Int64Counter + KafkaReceiverUnmarshalFailedLogRecords metric.Int64Counter + KafkaReceiverUnmarshalFailedMetricPoints metric.Int64Counter + KafkaReceiverUnmarshalFailedSpans metric.Int64Counter +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.KafkaReceiverCurrentOffset, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( + "otelcol_kafka_receiver_current_offset", + metric.WithDescription("Current message offset"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverMessages, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_messages", + metric.WithDescription("Number of received messages"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverOffsetLag, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Gauge( + "otelcol_kafka_receiver_offset_lag", + metric.WithDescription("Current offset lag"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverPartitionClose, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_partition_close", + metric.WithDescription("Number of finished partitions"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverPartitionStart, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_partition_start", + metric.WithDescription("Number of started partitions"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_unmarshal_failed_log_records", + metric.WithDescription("Number of log records failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_unmarshal_failed_metric_points", + metric.WithDescription("Number of metric points failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.KafkaReceiverUnmarshalFailedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_kafka_receiver_unmarshal_failed_spans", + metric.WithDescription("Number of spans failed to be unmarshaled"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go index 48fad72042c..ed001e9d11f 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/jaeger_unmarshaler.go @@ -13,8 +13,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) -type jaegerProtoSpanUnmarshaler struct { -} +type jaegerProtoSpanUnmarshaler struct{} var _ TracesUnmarshaler = (*jaegerProtoSpanUnmarshaler)(nil) @@ -31,8 +30,7 @@ func (j jaegerProtoSpanUnmarshaler) Encoding() string { return "jaeger_proto" } -type jaegerJSONSpanUnmarshaler struct { -} +type jaegerJSONSpanUnmarshaler struct{} var _ TracesUnmarshaler = (*jaegerJSONSpanUnmarshaler)(nil) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go index 9ff02d03a6e..33750457e71 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/json_unmarshaler.go @@ -10,8 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) -type jsonLogsUnmarshaler struct { -} +type jsonLogsUnmarshaler struct{} func newJSONLogsUnmarshaler() LogsUnmarshaler { return &jsonLogsUnmarshaler{} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go index 2b5b2954918..1ec2d5aca6e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/kafka_receiver.go @@ -11,22 +11,29 @@ import ( "sync" "github.com/IBM/sarama" - "go.opencensus.io/stats" - "go.opencensus.io/tag" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receiverhelper" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka" + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata" ) const ( transport = "kafka" + // TODO: update the following attributes to reflect semconv + attrInstanceName = "name" + attrPartition = "partition" ) -var errInvalidInitialOffset = fmt.Errorf("invalid initial offset") +var errInvalidInitialOffset = errors.New("invalid initial offset") // kafkaTracesConsumer uses sarama to consume and handle messages from kafka. type kafkaTracesConsumer struct { @@ -36,13 +43,18 @@ type kafkaTracesConsumer struct { topics []string cancelConsumeLoop context.CancelFunc unmarshaler TracesUnmarshaler + consumeLoopWG *sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtraction bool headers []string + minFetchSize int32 + defaultFetchSize int32 + maxFetchSize int32 } // kafkaMetricsConsumer uses sarama to consume and handle messages from kafka. @@ -53,13 +65,18 @@ type kafkaMetricsConsumer struct { topics []string cancelConsumeLoop context.CancelFunc unmarshaler MetricsUnmarshaler + consumeLoopWG *sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtraction bool headers []string + minFetchSize int32 + defaultFetchSize int32 + maxFetchSize int32 } // kafkaLogsConsumer uses sarama to consume and handle messages from kafka. @@ -70,38 +87,50 @@ type kafkaLogsConsumer struct { topics []string cancelConsumeLoop context.CancelFunc unmarshaler LogsUnmarshaler + consumeLoopWG *sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtraction bool headers []string + minFetchSize int32 + defaultFetchSize int32 + maxFetchSize int32 } -var _ receiver.Traces = (*kafkaTracesConsumer)(nil) -var _ receiver.Metrics = (*kafkaMetricsConsumer)(nil) -var _ receiver.Logs = (*kafkaLogsConsumer)(nil) +var ( + _ receiver.Traces = (*kafkaTracesConsumer)(nil) + _ receiver.Metrics = (*kafkaMetricsConsumer)(nil) + _ receiver.Logs = (*kafkaLogsConsumer)(nil) +) -func newTracesReceiver(config Config, set receiver.CreateSettings, unmarshaler TracesUnmarshaler, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { - if unmarshaler == nil { - return nil, errUnrecognizedEncoding +func newTracesReceiver(config Config, set receiver.Settings, nextConsumer consumer.Traces) (*kafkaTracesConsumer, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err } return &kafkaTracesConsumer{ config: config, topics: []string{config.Topic}, nextConsumer: nextConsumer, - unmarshaler: unmarshaler, + consumeLoopWG: &sync.WaitGroup{}, settings: set, autocommitEnabled: config.AutoCommit.Enable, messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, + minFetchSize: config.MinFetchSize, + defaultFetchSize: config.DefaultFetchSize, + maxFetchSize: config.MaxFetchSize, }, nil } -func createKafkaClient(config Config) (sarama.ConsumerGroup, error) { +func createKafkaClient(ctx context.Context, config Config) (sarama.ConsumerGroup, error) { saramaConfig := sarama.NewConfig() saramaConfig.ClientID = config.ClientID saramaConfig.Metadata.Full = config.Metadata.Full @@ -109,6 +138,12 @@ func createKafkaClient(config Config) (sarama.ConsumerGroup, error) { saramaConfig.Metadata.Retry.Backoff = config.Metadata.Retry.Backoff saramaConfig.Consumer.Offsets.AutoCommit.Enable = config.AutoCommit.Enable saramaConfig.Consumer.Offsets.AutoCommit.Interval = config.AutoCommit.Interval + saramaConfig.Consumer.Group.Session.Timeout = config.SessionTimeout + saramaConfig.Consumer.Group.Heartbeat.Interval = config.HeartbeatInterval + saramaConfig.Consumer.Fetch.Min = config.MinFetchSize + saramaConfig.Consumer.Fetch.Default = config.DefaultFetchSize + saramaConfig.Consumer.Fetch.Max = config.MaxFetchSize + var err error if saramaConfig.Consumer.Offsets.Initial, err = toSaramaInitialOffset(config.InitialOffset); err != nil { return nil, err @@ -121,13 +156,13 @@ func createKafkaClient(config Config) (sarama.ConsumerGroup, error) { return nil, err } } - if err := kafka.ConfigureAuthentication(config.Authentication, saramaConfig); err != nil { + if err := kafka.ConfigureAuthentication(ctx, config.Authentication, saramaConfig); err != nil { return nil, err } return sarama.NewConsumerGroup(config.Brokers, config.GroupID, saramaConfig) } -func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { +func (c *kafkaTracesConsumer) Start(_ context.Context, host component.Host) error { ctx, cancel := context.WithCancel(context.Background()) c.cancelConsumeLoop = cancel obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ @@ -138,9 +173,25 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { if err != nil { return err } + // extensions take precedence over internal encodings + if unmarshaler, errExt := loadEncodingExtension[ptrace.Unmarshaler]( + host, + c.config.Encoding, + ); errExt == nil { + c.unmarshaler = &tracesEncodingUnmarshaler{ + unmarshaler: *unmarshaler, + encoding: c.config.Encoding, + } + } + if unmarshaler, ok := defaultTracesUnmarshalers()[c.config.Encoding]; c.unmarshaler == nil && ok { + c.unmarshaler = unmarshaler + } + if c.unmarshaler == nil { + return errUnrecognizedEncoding + } // consumerGroup may be set in tests to inject fake implementation. if c.consumerGroup == nil { - if c.consumerGroup, err = createKafkaClient(c.config); err != nil { + if c.consumerGroup, err = createKafkaClient(ctx, c.config); err != nil { return err } } @@ -153,6 +204,7 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { consumerGroup.headerExtractor = &headerExtractor{ @@ -160,16 +212,14 @@ func (c *kafkaTracesConsumer) Start(_ context.Context, _ component.Host) error { headers: c.headers, } } - go func() { - if err := c.consumeLoop(ctx, consumerGroup); !errors.Is(err, context.Canceled) { - c.settings.ReportStatus(component.NewFatalErrorEvent(err)) - } - }() + c.consumeLoopWG.Add(1) + go c.consumeLoop(ctx, consumerGroup) <-consumerGroup.ready return nil } -func (c *kafkaTracesConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { +func (c *kafkaTracesConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) { + defer c.consumeLoopWG.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be @@ -180,7 +230,7 @@ func (c *kafkaTracesConsumer) consumeLoop(ctx context.Context, handler sarama.Co // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { c.settings.Logger.Info("Consumer stopped", zap.Error(ctx.Err())) - return ctx.Err() + return } } } @@ -190,31 +240,37 @@ func (c *kafkaTracesConsumer) Shutdown(context.Context) error { return nil } c.cancelConsumeLoop() + c.consumeLoopWG.Wait() if c.consumerGroup == nil { return nil } return c.consumerGroup.Close() } -func newMetricsReceiver(config Config, set receiver.CreateSettings, unmarshaler MetricsUnmarshaler, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { - if unmarshaler == nil { - return nil, errUnrecognizedEncoding +func newMetricsReceiver(config Config, set receiver.Settings, nextConsumer consumer.Metrics) (*kafkaMetricsConsumer, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err } return &kafkaMetricsConsumer{ config: config, topics: []string{config.Topic}, nextConsumer: nextConsumer, - unmarshaler: unmarshaler, + consumeLoopWG: &sync.WaitGroup{}, settings: set, autocommitEnabled: config.AutoCommit.Enable, messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, + minFetchSize: config.MinFetchSize, + defaultFetchSize: config.DefaultFetchSize, + maxFetchSize: config.MaxFetchSize, }, nil } -func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error { +func (c *kafkaMetricsConsumer) Start(_ context.Context, host component.Host) error { ctx, cancel := context.WithCancel(context.Background()) c.cancelConsumeLoop = cancel obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ @@ -225,9 +281,25 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error if err != nil { return err } + // extensions take precedence over internal encodings + if unmarshaler, errExt := loadEncodingExtension[pmetric.Unmarshaler]( + host, + c.config.Encoding, + ); errExt == nil { + c.unmarshaler = &metricsEncodingUnmarshaler{ + unmarshaler: *unmarshaler, + encoding: c.config.Encoding, + } + } + if unmarshaler, ok := defaultMetricsUnmarshalers()[c.config.Encoding]; c.unmarshaler == nil && ok { + c.unmarshaler = unmarshaler + } + if c.unmarshaler == nil { + return errUnrecognizedEncoding + } // consumerGroup may be set in tests to inject fake implementation. if c.consumerGroup == nil { - if c.consumerGroup, err = createKafkaClient(c.config); err != nil { + if c.consumerGroup, err = createKafkaClient(ctx, c.config); err != nil { return err } } @@ -240,6 +312,7 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { metricsConsumerGroup.headerExtractor = &headerExtractor{ @@ -247,16 +320,14 @@ func (c *kafkaMetricsConsumer) Start(_ context.Context, _ component.Host) error headers: c.headers, } } - go func() { - if err := c.consumeLoop(ctx, metricsConsumerGroup); err != nil { - c.settings.ReportStatus(component.NewFatalErrorEvent(err)) - } - }() + c.consumeLoopWG.Add(1) + go c.consumeLoop(ctx, metricsConsumerGroup) <-metricsConsumerGroup.ready return nil } -func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { +func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) { + defer c.consumeLoopWG.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be @@ -267,7 +338,7 @@ func (c *kafkaMetricsConsumer) consumeLoop(ctx context.Context, handler sarama.C // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { c.settings.Logger.Info("Consumer stopped", zap.Error(ctx.Err())) - return ctx.Err() + return } } } @@ -277,31 +348,37 @@ func (c *kafkaMetricsConsumer) Shutdown(context.Context) error { return nil } c.cancelConsumeLoop() + c.consumeLoopWG.Wait() if c.consumerGroup == nil { return nil } return c.consumerGroup.Close() } -func newLogsReceiver(config Config, set receiver.CreateSettings, unmarshaler LogsUnmarshaler, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { - if unmarshaler == nil { - return nil, errUnrecognizedEncoding +func newLogsReceiver(config Config, set receiver.Settings, nextConsumer consumer.Logs) (*kafkaLogsConsumer, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err } return &kafkaLogsConsumer{ config: config, topics: []string{config.Topic}, nextConsumer: nextConsumer, - unmarshaler: unmarshaler, + consumeLoopWG: &sync.WaitGroup{}, settings: set, autocommitEnabled: config.AutoCommit.Enable, messageMarking: config.MessageMarking, headerExtraction: config.HeaderExtraction.ExtractHeaders, headers: config.HeaderExtraction.Headers, + telemetryBuilder: telemetryBuilder, + minFetchSize: config.MinFetchSize, + defaultFetchSize: config.DefaultFetchSize, + maxFetchSize: config.MaxFetchSize, }, nil } -func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { +func (c *kafkaLogsConsumer) Start(_ context.Context, host component.Host) error { ctx, cancel := context.WithCancel(context.Background()) c.cancelConsumeLoop = cancel obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ @@ -312,9 +389,28 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { if err != nil { return err } + // extensions take precedence over internal encodings + if unmarshaler, errExt := loadEncodingExtension[plog.Unmarshaler]( + host, + c.config.Encoding, + ); errExt == nil { + c.unmarshaler = &logsEncodingUnmarshaler{ + unmarshaler: *unmarshaler, + encoding: c.config.Encoding, + } + } + if unmarshaler, errInt := getLogsUnmarshaler( + c.config.Encoding, + defaultLogsUnmarshalers(c.settings.BuildInfo.Version, c.settings.Logger), + ); c.unmarshaler == nil && errInt == nil { + c.unmarshaler = unmarshaler + } + if c.unmarshaler == nil { + return errUnrecognizedEncoding + } // consumerGroup may be set in tests to inject fake implementation. if c.consumerGroup == nil { - if c.consumerGroup, err = createKafkaClient(c.config); err != nil { + if c.consumerGroup, err = createKafkaClient(ctx, c.config); err != nil { return err } } @@ -327,6 +423,7 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { autocommitEnabled: c.autocommitEnabled, messageMarking: c.messageMarking, headerExtractor: &nopHeaderExtractor{}, + telemetryBuilder: c.telemetryBuilder, } if c.headerExtraction { logsConsumerGroup.headerExtractor = &headerExtractor{ @@ -334,16 +431,14 @@ func (c *kafkaLogsConsumer) Start(_ context.Context, _ component.Host) error { headers: c.headers, } } - go func() { - if err := c.consumeLoop(ctx, logsConsumerGroup); err != nil { - c.settings.ReportStatus(component.NewFatalErrorEvent(err)) - } - }() + c.consumeLoopWG.Add(1) + go c.consumeLoop(ctx, logsConsumerGroup) <-logsConsumerGroup.ready return nil } -func (c *kafkaLogsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) error { +func (c *kafkaLogsConsumer) consumeLoop(ctx context.Context, handler sarama.ConsumerGroupHandler) { + defer c.consumeLoopWG.Done() for { // `Consume` should be called inside an infinite loop, when a // server-side rebalance happens, the consumer session will need to be @@ -354,7 +449,7 @@ func (c *kafkaLogsConsumer) consumeLoop(ctx context.Context, handler sarama.Cons // check if context was cancelled, signaling that the consumer should stop if ctx.Err() != nil { c.settings.Logger.Info("Consumer stopped", zap.Error(ctx.Err())) - return ctx.Err() + return } } } @@ -364,6 +459,7 @@ func (c *kafkaLogsConsumer) Shutdown(context.Context) error { return nil } c.cancelConsumeLoop() + c.consumeLoopWG.Wait() if c.consumerGroup == nil { return nil } @@ -379,7 +475,8 @@ type tracesConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -395,7 +492,8 @@ type metricsConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking @@ -411,29 +509,30 @@ type logsConsumerGroupHandler struct { logger *zap.Logger - obsrecv *receiverhelper.ObsReport + obsrecv *receiverhelper.ObsReport + telemetryBuilder *metadata.TelemetryBuilder autocommitEnabled bool messageMarking MessageMarking headerExtractor HeaderExtractor } -var _ sarama.ConsumerGroupHandler = (*tracesConsumerGroupHandler)(nil) -var _ sarama.ConsumerGroupHandler = (*metricsConsumerGroupHandler)(nil) -var _ sarama.ConsumerGroupHandler = (*logsConsumerGroupHandler)(nil) +var ( + _ sarama.ConsumerGroupHandler = (*tracesConsumerGroupHandler)(nil) + _ sarama.ConsumerGroupHandler = (*metricsConsumerGroupHandler)(nil) + _ sarama.ConsumerGroupHandler = (*logsConsumerGroupHandler)(nil) +) func (c *tracesConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) error { c.readyCloser.Do(func() { close(c.ready) }) - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } func (c *tracesConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } @@ -457,22 +556,18 @@ func (c *tracesConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSe } ctx := c.obsrecv.StartTracesOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags(ctx, statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) traces, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedSpans.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedSpans.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -509,14 +604,12 @@ func (c *metricsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) c.readyCloser.Do(func() { close(c.ready) }) - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } func (c *metricsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - statsTags := []tag.Mutator{tag.Upsert(tagInstanceName, c.id.Name())} - _ = stats.RecordWithTags(session.Context(), statsTags, statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.Name()))) return nil } @@ -540,22 +633,18 @@ func (c *metricsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupS } ctx := c.obsrecv.StartMetricsOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags(ctx, statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) metrics, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedMetricPoints.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedMetricPoints.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -592,18 +681,12 @@ func (c *logsConsumerGroupHandler) Setup(session sarama.ConsumerGroupSession) er c.readyCloser.Do(func() { close(c.ready) }) - _ = stats.RecordWithTags( - session.Context(), - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statPartitionStart.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionStart.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) return nil } func (c *logsConsumerGroupHandler) Cleanup(session sarama.ConsumerGroupSession) error { - _ = stats.RecordWithTags( - session.Context(), - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statPartitionClose.M(1)) + c.telemetryBuilder.KafkaReceiverPartitionClose.Add(session.Context(), 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) return nil } @@ -627,24 +710,18 @@ func (c *logsConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSess } ctx := c.obsrecv.StartLogsOp(session.Context()) - statsTags := []tag.Mutator{ - tag.Upsert(tagInstanceName, c.id.String()), - tag.Upsert(tagPartition, strconv.Itoa(int(claim.Partition()))), - } - _ = stats.RecordWithTags( - ctx, - statsTags, - statMessageCount.M(1), - statMessageOffset.M(message.Offset), - statMessageOffsetLag.M(claim.HighWaterMarkOffset()-message.Offset-1)) + attrs := attribute.NewSet( + attribute.String(attrInstanceName, c.id.String()), + attribute.String(attrPartition, strconv.Itoa(int(claim.Partition()))), + ) + c.telemetryBuilder.KafkaReceiverMessages.Add(ctx, 1, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverCurrentOffset.Record(ctx, message.Offset, metric.WithAttributeSet(attrs)) + c.telemetryBuilder.KafkaReceiverOffsetLag.Record(ctx, claim.HighWaterMarkOffset()-message.Offset-1, metric.WithAttributeSet(attrs)) logs, err := c.unmarshaler.Unmarshal(message.Value) if err != nil { c.logger.Error("failed to unmarshal message", zap.Error(err)) - _ = stats.RecordWithTags( - ctx, - []tag.Mutator{tag.Upsert(tagInstanceName, c.id.String())}, - statUnmarshalFailedLogRecords.M(1)) + c.telemetryBuilder.KafkaReceiverUnmarshalFailedLogRecords.Add(ctx, 1, metric.WithAttributes(attribute.String(attrInstanceName, c.id.String()))) if c.messageMarking.After && c.messageMarking.OnError { session.MarkMessage(message, "") } @@ -688,3 +765,30 @@ func toSaramaInitialOffset(initialOffset string) (int64, error) { return 0, errInvalidInitialOffset } } + +// loadEncodingExtension tries to load an available extension for the given encoding. +func loadEncodingExtension[T any](host component.Host, encoding string) (*T, error) { + extensionID, err := encodingToComponentID(encoding) + if err != nil { + return nil, err + } + encodingExtension, ok := host.GetExtensions()[*extensionID] + if !ok { + return nil, fmt.Errorf("unknown encoding extension %q", encoding) + } + unmarshaler, ok := encodingExtension.(T) + if !ok { + return nil, fmt.Errorf("extension %q is not an unmarshaler", encoding) + } + return &unmarshaler, nil +} + +// encodingToComponentID converts an encoding string to a component ID using the given encoding as type. +func encodingToComponentID(encoding string) (*component.ID, error) { + componentType, err := component.NewType(encoding) + if err != nil { + return nil, fmt.Errorf("invalid component type: %w", err) + } + id := component.NewID(componentType) + return &id, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml index 8ccc15c2814..8e02192b17c 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metadata.yaml @@ -1,5 +1,4 @@ type: kafka -scope_name: otelcol/kafkareceiver status: class: receiver @@ -14,8 +13,60 @@ status: # TODO: Update the receiver to pass the tests tests: skip_lifecycle: true - goleak: - ignore: - top: - # See https://github.com/census-instrumentation/opencensus-go/issues/1191 for more information. - - "go.opencensus.io/stats/view.(*worker).start" + +telemetry: + metrics: + kafka_receiver_messages: + enabled: true + description: Number of received messages + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_current_offset: + enabled: true + description: Current message offset + unit: "1" + gauge: + value_type: int + kafka_receiver_offset_lag: + enabled: true + description: Current offset lag + unit: "1" + gauge: + value_type: int + kafka_receiver_partition_start: + enabled: true + description: Number of started partitions + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_partition_close: + enabled: true + description: Number of finished partitions + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_metric_points: + enabled: true + description: Number of metric points failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_log_records: + enabled: true + description: Number of log records failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true + kafka_receiver_unmarshal_failed_spans: + enabled: true + description: Number of spans failed to be unmarshaled + unit: "1" + sum: + value_type: int + monotonic: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go deleted file mode 100644 index c1dc6d3e7c3..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/metrics.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" - -import ( - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" -) - -var ( - tagInstanceName, _ = tag.NewKey("name") - tagPartition, _ = tag.NewKey("partition") - - statMessageCount = stats.Int64("kafka_receiver_messages", "Number of received messages", stats.UnitDimensionless) - statMessageOffset = stats.Int64("kafka_receiver_current_offset", "Current message offset", stats.UnitDimensionless) - statMessageOffsetLag = stats.Int64("kafka_receiver_offset_lag", "Current offset lag", stats.UnitDimensionless) - - statPartitionStart = stats.Int64("kafka_receiver_partition_start", "Number of started partitions", stats.UnitDimensionless) - statPartitionClose = stats.Int64("kafka_receiver_partition_close", "Number of finished partitions", stats.UnitDimensionless) - - statUnmarshalFailedMetricPoints = stats.Int64("kafka_receiver_unmarshal_failed_metric_points", "Number of metric points failed to be unmarshaled", stats.UnitDimensionless) - statUnmarshalFailedLogRecords = stats.Int64("kafka_receiver_unmarshal_failed_log_records", "Number of log records failed to be unmarshaled", stats.UnitDimensionless) - statUnmarshalFailedSpans = stats.Int64("kafka_receiver_unmarshal_failed_spans", "Number of spans failed to be unmarshaled", stats.UnitDimensionless) -) - -// metricViews return metric views for Kafka receiver. -func metricViews() []*view.View { - partitionAgnosticTagKeys := []tag.Key{tagInstanceName} - partitionSpecificTagKeys := []tag.Key{tagInstanceName, tagPartition} - - countMessages := &view.View{ - Name: statMessageCount.Name(), - Measure: statMessageCount, - Description: statMessageCount.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.Sum(), - } - - lastValueOffset := &view.View{ - Name: statMessageOffset.Name(), - Measure: statMessageOffset, - Description: statMessageOffset.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.LastValue(), - } - - lastValueOffsetLag := &view.View{ - Name: statMessageOffsetLag.Name(), - Measure: statMessageOffsetLag, - Description: statMessageOffsetLag.Description(), - TagKeys: partitionSpecificTagKeys, - Aggregation: view.LastValue(), - } - - countPartitionStart := &view.View{ - Name: statPartitionStart.Name(), - Measure: statPartitionStart, - Description: statPartitionStart.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countPartitionClose := &view.View{ - Name: statPartitionClose.Name(), - Measure: statPartitionClose, - Description: statPartitionClose.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedMetricPoints := &view.View{ - Name: statUnmarshalFailedMetricPoints.Name(), - Measure: statUnmarshalFailedMetricPoints, - Description: statUnmarshalFailedMetricPoints.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedLogRecords := &view.View{ - Name: statUnmarshalFailedLogRecords.Name(), - Measure: statUnmarshalFailedLogRecords, - Description: statUnmarshalFailedLogRecords.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - countUnmarshalFailedSpans := &view.View{ - Name: statUnmarshalFailedSpans.Name(), - Measure: statUnmarshalFailedSpans, - Description: statUnmarshalFailedSpans.Description(), - TagKeys: partitionAgnosticTagKeys, - Aggregation: view.Sum(), - } - - return []*view.View{ - countMessages, - lastValueOffset, - lastValueOffsetLag, - countPartitionStart, - countPartitionClose, - countUnmarshalFailedMetricPoints, - countUnmarshalFailedLogRecords, - countUnmarshalFailedSpans, - } -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go index bf44be7b496..793848d94c8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/unmarshaler.go @@ -17,7 +17,6 @@ import ( type TracesUnmarshaler interface { // Unmarshal deserializes the message body into traces. Unmarshal([]byte) (ptrace.Traces, error) - // Encoding of the serialized messages. Encoding() string } @@ -26,7 +25,6 @@ type TracesUnmarshaler interface { type MetricsUnmarshaler interface { // Unmarshal deserializes the message body into traces Unmarshal([]byte) (pmetric.Metrics, error) - // Encoding of the serialized messages Encoding() string } @@ -35,14 +33,12 @@ type MetricsUnmarshaler interface { type LogsUnmarshaler interface { // Unmarshal deserializes the message body into traces. Unmarshal([]byte) (plog.Logs, error) - // Encoding of the serialized messages. Encoding() string } type LogsUnmarshalerWithEnc interface { LogsUnmarshaler - // WithEnc sets the character encoding (UTF-8, GBK, etc.) of the unmarshaler. WithEnc(string) (LogsUnmarshalerWithEnc, error) } @@ -50,6 +46,7 @@ type LogsUnmarshalerWithEnc interface { // defaultTracesUnmarshalers returns map of supported encodings with TracesUnmarshaler. func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { otlpPb := newPdataTracesUnmarshaler(&ptrace.ProtoUnmarshaler{}, defaultEncoding) + otlpJSON := newPdataTracesUnmarshaler(&ptrace.JSONUnmarshaler{}, "otlp_json") jaegerProto := jaegerProtoSpanUnmarshaler{} jaegerJSON := jaegerJSONSpanUnmarshaler{} zipkinProto := newPdataTracesUnmarshaler(zipkinv2.NewProtobufTracesUnmarshaler(false, false), "zipkin_proto") @@ -57,6 +54,7 @@ func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { zipkinThrift := newPdataTracesUnmarshaler(zipkinv1.NewThriftTracesUnmarshaler(), "zipkin_thrift") return map[string]TracesUnmarshaler{ otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, jaegerProto.Encoding(): jaegerProto, jaegerJSON.Encoding(): jaegerJSON, zipkinProto.Encoding(): zipkinProto, @@ -67,22 +65,68 @@ func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { func defaultMetricsUnmarshalers() map[string]MetricsUnmarshaler { otlpPb := newPdataMetricsUnmarshaler(&pmetric.ProtoUnmarshaler{}, defaultEncoding) + otlpJSON := newPdataMetricsUnmarshaler(&pmetric.JSONUnmarshaler{}, "otlp_json") return map[string]MetricsUnmarshaler{ - otlpPb.Encoding(): otlpPb, + otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, } } func defaultLogsUnmarshalers(version string, logger *zap.Logger) map[string]LogsUnmarshaler { azureResourceLogs := newAzureResourceLogsUnmarshaler(version, logger) otlpPb := newPdataLogsUnmarshaler(&plog.ProtoUnmarshaler{}, defaultEncoding) + otlpJSON := newPdataLogsUnmarshaler(&plog.JSONUnmarshaler{}, "otlp_json") raw := newRawLogsUnmarshaler() text := newTextLogsUnmarshaler() json := newJSONLogsUnmarshaler() return map[string]LogsUnmarshaler{ azureResourceLogs.Encoding(): azureResourceLogs, otlpPb.Encoding(): otlpPb, + otlpJSON.Encoding(): otlpJSON, raw.Encoding(): raw, text.Encoding(): text, json.Encoding(): json, } } + +// tracesEncodingUnmarshaler is a wrapper around ptrace.Unmarshaler that implements TracesUnmarshaler. +type tracesEncodingUnmarshaler struct { + unmarshaler ptrace.Unmarshaler + encoding string +} + +func (t *tracesEncodingUnmarshaler) Unmarshal(data []byte) (ptrace.Traces, error) { + return t.unmarshaler.UnmarshalTraces(data) +} + +func (t *tracesEncodingUnmarshaler) Encoding() string { + return t.encoding +} + +// metricsEncodingUnmarshaler is a wrapper around pmetric.Unmarshaler that implements MetricsUnmarshaler. +type metricsEncodingUnmarshaler struct { + unmarshaler pmetric.Unmarshaler + encoding string +} + +func (m *metricsEncodingUnmarshaler) Unmarshal(data []byte) (pmetric.Metrics, error) { + return m.unmarshaler.UnmarshalMetrics(data) +} + +func (m *metricsEncodingUnmarshaler) Encoding() string { + return m.encoding +} + +// logsEncodingUnmarshaler is a wrapper around plog.Unmarshaler that implements LogsUnmarshaler. +type logsEncodingUnmarshaler struct { + unmarshaler plog.Unmarshaler + encoding string +} + +func (l *logsEncodingUnmarshaler) Unmarshal(data []byte) (plog.Logs, error) { + return l.unmarshaler.UnmarshalLogs(data) +} + +func (l *logsEncodingUnmarshaler) Encoding() string { + return l.encoding +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md index c53d206fb0c..65aba34c54e 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [beta]: metrics, traces | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fopencensus%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fopencensus) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fopencensus%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fopencensus) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@open-telemetry/collector-approvers](https://github.com/orgs/open-telemetry/teams/collector-approvers) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s Receives data via gRPC or HTTP using [OpenCensus]( https://opencensus.io/) @@ -28,10 +29,9 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:55678): host:port to which the receiver is +- `endpoint` (default = localhost:55678): host:port to which the receiver is going to receive data. The valid syntax is described at - https://github.com/grpc/grpc/blob/master/doc/naming.md. The - `component.UseLocalHostAsDefaultHost` feature gate changes this to localhost:55678. This will become the default in a future release. + https://github.com/grpc/grpc/blob/master/doc/naming.md. See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. ## Advanced Configuration diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go index f45c84eeb47..d406b1d6e91 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/factory.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata" ) @@ -32,7 +32,7 @@ func createDefaultConfig() component.Config { return &Config{ ServerConfig: configgrpc.ServerConfig{ NetAddr: confignet.AddrConfig{ - Endpoint: localhostgate.EndpointForPort(grpcPort), + Endpoint: testutil.EndpointForPort(grpcPort), Transport: confignet.TransportTypeTCP, }, // We almost write 0 bytes, so no need to tune WriteBufferSize. @@ -43,7 +43,7 @@ func createDefaultConfig() component.Config { func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { @@ -58,7 +58,7 @@ func createTracesReceiver( func createMetricsReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { @@ -73,6 +73,6 @@ func createMetricsReceiver( // This is the map of already created OpenCensus receivers for particular configurations. // We maintain this map because the Factory is asked trace and metric receivers separately -// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// when it gets CreateTraces() and CreateMetrics() but they must not // create separate objects, they must use one ocReceiver object per configuration. var receivers = sharedcomponent.NewSharedComponents() diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go index 0e972cfaae3..931dac2ca32 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("opencensus") + Type = component.MustNewType("opencensus") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index 66aba2295e9..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/opencensusreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/opencensusreceiver") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go index a99bf7b865d..a07bee4a797 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics/opencensus.go @@ -27,7 +27,7 @@ type Receiver struct { } // New creates a new ocmetrics.Receiver reference. -func New(nextConsumer consumer.Metrics, set receiver.CreateSettings) (*Receiver, error) { +func New(nextConsumer consumer.Metrics, set receiver.Settings) (*Receiver, error) { obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: receiverTransport, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go index 8edf43e8628..9977d9fa670 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace/opencensus.go @@ -32,8 +32,7 @@ type Receiver struct { } // New creates a new opencensus.Receiver reference. -func New(nextConsumer consumer.Traces, set receiver.CreateSettings) (*Receiver, error) { - +func New(nextConsumer consumer.Traces, set receiver.Settings) (*Receiver, error) { obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: receiverTransport, diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml index 7d996ec7ce0..9971ee24253 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/metadata.yaml @@ -1,12 +1,9 @@ type: opencensus -scope_name: otelcol/opencensusreceiver status: class: receiver stability: beta: [metrics, traces] - distributions: - - core - - contrib + distributions: [core, contrib, k8s] codeowners: active: [open-telemetry/collector-approvers] \ No newline at end of file diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go index fa37d0b6cf4..e5047c45cd9 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/opencensus.go @@ -18,6 +18,7 @@ import ( "github.com/rs/cors" "github.com/soheilhy/cmux" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" @@ -47,7 +48,7 @@ type ocReceiver struct { stopWG sync.WaitGroup - settings receiver.CreateSettings + settings receiver.Settings multiplexer cmux.CMux } @@ -58,7 +59,7 @@ func newOpenCensusReceiver( cfg *Config, tc consumer.Traces, mc consumer.Metrics, - settings receiver.CreateSettings, + settings receiver.Settings, opts ...ocOption, ) *ocReceiver { ocr := &ocReceiver{ @@ -113,10 +114,12 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { if !hasConsumer { return errors.New("cannot start receiver: no consumers were specified") } + ocr.ln, err = net.Listen(string(ocr.cfg.NetAddr.Transport), ocr.cfg.NetAddr.Endpoint) if err != nil { return fmt.Errorf("failed to bind to address %q: %w", ocr.cfg.NetAddr.Endpoint, err) } + // Register the grpc-gateway on the HTTP server mux var c context.Context c, ocr.cancel = context.WithCancel(context.Background()) @@ -143,20 +146,20 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { defer ocr.stopWG.Done() startWG.Done() // Check for cmux.ErrServerClosed, because during the shutdown this is not properly close before closing the cmux, - if err := ocr.serverGRPC.Serve(grpcL); !errors.Is(err, grpc.ErrServerStopped) && !errors.Is(err, cmux.ErrServerClosed) && err != nil { - ocr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(err)) + if err := ocr.serverGRPC.Serve(grpcL); err != nil && !errors.Is(err, grpc.ErrServerStopped) && !errors.Is(err, cmux.ErrServerClosed) { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() go func() { startWG.Done() - if err := ocr.serverHTTP.Serve(httpL); !errors.Is(err, http.ErrServerClosed) && !errors.Is(err, cmux.ErrServerClosed) && err != nil { - ocr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(err)) + if err := ocr.serverHTTP.Serve(httpL); err != nil && !errors.Is(err, http.ErrServerClosed) && !errors.Is(err, cmux.ErrServerClosed) { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() go func() { startWG.Done() - if err := ocr.multiplexer.Serve(); !errors.Is(err, cmux.ErrServerClosed) && err != nil { - ocr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(err)) + if err := ocr.multiplexer.Serve(); err != nil && !errors.Is(err, net.ErrClosed) { + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(err)) } }() @@ -186,7 +189,6 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { // Shutdown is a method to turn off receiving. func (ocr *ocReceiver) Shutdown(context.Context) error { - if ocr.cancel != nil { ocr.cancel() } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go index 3851fca059f..893eee4b676 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/options.go @@ -36,6 +36,7 @@ func withGRPCServerSettings(settings configgrpc.ServerConfig) ocOption { gsvOpts := grpcServerSettings(settings) return gsvOpts } + func (gsvo grpcServerSettings) withReceiver(ocr *ocReceiver) { ocr.grpcServerSettings = configgrpc.ServerConfig(gsvo) } diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md index 7cbd83afb62..0fc0939be84 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/README.md @@ -4,13 +4,14 @@ | Status | | | ------------- |-----------| | Stability | [beta]: traces | -| Distributions | [core], [contrib] | +| Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fzipkin%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fzipkin) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fzipkin%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fzipkin) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@MovieStoreGuy](https://www.github.com/MovieStoreGuy), [@andrzej-stencel](https://www.github.com/andrzej-stencel), [@crobert-1](https://www.github.com/crobert-1) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s This receiver receives spans from [Zipkin](https://zipkin.io/) (V1 and V2). @@ -27,7 +28,7 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:9411): host:port on which the receiver is going to receive data. The `component.UseLocalHostAsDefaultHost` feature gate changes this to localhost:9411. This will become the default in a future release. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). +- `endpoint` (default = localhost:9411): host:port on which the receiver is going to receive data.See our [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) to understand how to set the endpoint in different environments. For full list of `ServerConfig` refer [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp). - `parse_string_tags` (default = false): if enabled, the receiver will attempt to parse string tags/binary annotations into int/bool/float. ## Advanced Configuration diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go index 0675a1829e7..dd86d7d9c47 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/factory.go @@ -11,7 +11,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/receiver" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata" ) @@ -19,7 +19,7 @@ import ( const ( defaultHTTPPort = 9411 - defaultBindEndpoint = "0.0.0.0:9411" + defaultBindEndpoint = "localhost:9411" ) // NewFactory creates a new Zipkin receiver factory @@ -35,7 +35,7 @@ func NewFactory() receiver.Factory { func createDefaultConfig() component.Config { return &Config{ ServerConfig: confighttp.ServerConfig{ - Endpoint: localhostgate.EndpointForPort(defaultHTTPPort), + Endpoint: testutil.EndpointForPort(defaultHTTPPort), }, ParseStringTags: false, } @@ -44,7 +44,7 @@ func createDefaultConfig() component.Config { // createTracesReceiver creates a trace receiver based on provided config. func createTracesReceiver( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go index 9c236a37f60..d7bffd7a8f8 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_status.go @@ -7,7 +7,8 @@ import ( ) var ( - Type = component.MustNewType("zipkin") + Type = component.MustNewType("zipkin") + ScopeName = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver" ) const ( diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go deleted file mode 100644 index e3dd94b3129..00000000000 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata/generated_telemetry.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by mdatagen. DO NOT EDIT. - -package metadata - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" -) - -func Meter(settings component.TelemetrySettings) metric.Meter { - return settings.MeterProvider.Meter("otelcol/zipkinreceiver") -} - -func Tracer(settings component.TelemetrySettings) trace.Tracer { - return settings.TracerProvider.Tracer("otelcol/zipkinreceiver") -} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml index 2ebc6c5df30..779d41098bb 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/metadata.yaml @@ -1,12 +1,9 @@ type: zipkin -scope_name: otelcol/zipkinreceiver status: class: receiver stability: beta: [traces] - distributions: - - core - - contrib + distributions: [core, contrib, k8s] codeowners: active: [MovieStoreGuy, andrzej-stencel, crobert-1] diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go index 9a5a897a70f..c3f5ceffc54 100644 --- a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/trace_receiver.go @@ -15,6 +15,7 @@ import ( "sync" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/pdata/ptrace" @@ -32,8 +33,10 @@ const ( receiverTransportV2PROTO = "http_v2_proto" ) -var errNextConsumerRespBody = []byte(`"Internal Server Error"`) -var errBadRequestRespBody = []byte(`"Bad Request"`) +var ( + errNextConsumerRespBody = []byte(`"Internal Server Error"`) + errBadRequestRespBody = []byte(`"Bad Request"`) +) // zipkinReceiver type is used to handle spans received in the Zipkin format. type zipkinReceiver struct { @@ -49,14 +52,14 @@ type zipkinReceiver struct { protobufUnmarshaler ptrace.Unmarshaler protobufDebugUnmarshaler ptrace.Unmarshaler - settings receiver.CreateSettings + settings receiver.Settings obsrecvrs map[string]*receiverhelper.ObsReport } var _ http.Handler = (*zipkinReceiver)(nil) // newReceiver creates a new zipkinReceiver reference. -func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.CreateSettings) (*zipkinReceiver, error) { +func newReceiver(config *Config, nextConsumer consumer.Traces, settings receiver.Settings) (*zipkinReceiver, error) { transports := []string{receiverTransportV1Thrift, receiverTransportV1JSON, receiverTransportV2JSON, receiverTransportV2PROTO} obsrecvrs := make(map[string]*receiverhelper.ObsReport) for _, transport := range transports { @@ -107,7 +110,7 @@ func (zr *zipkinReceiver) Start(ctx context.Context, host component.Host) error defer zr.shutdownWG.Done() if errHTTP := zr.server.Serve(listener); !errors.Is(errHTTP, http.ErrServerClosed) && errHTTP != nil { - zr.settings.TelemetrySettings.ReportStatus(component.NewFatalErrorEvent(errHTTP)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() @@ -253,7 +256,6 @@ func (zr *zipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write(errNextConsumerRespBody) } - } func transportType(r *http.Request, asZipkinv1 bool) string { diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 1d8b69e65e0..ec52857a3ee 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index d964b25fe19..0755e55642d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -565,7 +565,7 @@ complete solutions exist out there. ## Versioning -Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Expect for parts explicitly marked otherwise, go-toml follows [Semantic Versioning](https://semver.org). The supported version of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this document. The last two major versions of Go are supported (see [Go Release diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 7f4e20c1285..161acd93439 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -8,7 +8,7 @@ import ( "io" "math" "reflect" - "sort" + "slices" "strconv" "strings" "time" @@ -280,7 +280,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e } hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } @@ -631,6 +631,18 @@ func (enc *Encoder) keyToString(k reflect.Value) (string, error) { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + return strconv.FormatInt(k.Int(), 10), nil + + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + return strconv.FormatUint(k.Uint(), 10), nil + + case keyType.Kind() == reflect.Float32: + return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil + + case keyType.Kind() == reflect.Float64: + return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } @@ -668,8 +680,8 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte } func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key + slices.SortFunc(e, func(a, b entry) int { + return strings.Compare(a.Key, b.Key) }) } @@ -732,7 +744,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) - } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { walkStruct(ctx, t, f.Elem()) } continue @@ -951,7 +963,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 98231bae65b..c3df8bee1cf 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "reflect" + "strconv" "strings" "sync/atomic" "time" @@ -21,10 +21,8 @@ import ( // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - + d := decoder{} + d.p.Reset(data) return d.FromParser(v) } @@ -117,27 +115,25 @@ func (d *Decoder) EnableUnmarshalerInterface() *Decoder { // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) + b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } - p := unstable.Parser{} - p.Reset(b) dec := decoder{ - p: &p, strict: strict{ Enabled: d.strict, }, unmarshalerInterface: d.unmarshalerInterface, } + dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. - p *unstable.Parser + p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -1078,12 +1074,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, } return mk, nil - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + key, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + key, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + + case keyType.Kind() == reflect.Float32: + key, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float32(key)), nil + + case keyType.Kind() == reflect.Float64: + key, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } diff --git a/vendor/github.com/power-devops/perfstat/config.go b/vendor/github.com/power-devops/perfstat/config.go index de7230d28c0..a6df39c6720 100644 --- a/vendor/github.com/power-devops/perfstat/config.go +++ b/vendor/github.com/power-devops/perfstat/config.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/cpustat.go b/vendor/github.com/power-devops/perfstat/cpustat.go index 846daafba94..10f543fa4d5 100644 --- a/vendor/github.com/power-devops/perfstat/cpustat.go +++ b/vendor/github.com/power-devops/perfstat/cpustat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -135,4 +136,3 @@ func CpuUtilTotalStat() (*CPUUtil, error) { u := perfstatcpuutil2cpuutil(cpuutil) return &u, nil } - diff --git a/vendor/github.com/power-devops/perfstat/diskstat.go b/vendor/github.com/power-devops/perfstat/diskstat.go index fc70dfaa4e1..06763b4bc9f 100644 --- a/vendor/github.com/power-devops/perfstat/diskstat.go +++ b/vendor/github.com/power-devops/perfstat/diskstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/doc.go b/vendor/github.com/power-devops/perfstat/doc.go index 85eaf3e7eda..9730a61c2c0 100644 --- a/vendor/github.com/power-devops/perfstat/doc.go +++ b/vendor/github.com/power-devops/perfstat/doc.go @@ -1,3 +1,4 @@ +//go:build !aix // +build !aix // Copyright 2020 Power-Devops.com. All rights reserved. @@ -36,24 +37,24 @@ func DisableLVMStat() {} // CpuStat() returns array of CPU structures with information about // logical CPUs on the system. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html func CpuStat() ([]CPU, error) { return nil, fmt.Errorf("not implemented") } // CpuTotalStat() returns general information about CPUs on the system. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html func CpuTotalStat() (*CPUTotal, error) { return nil, fmt.Errorf("not implemented") } // CpuUtilStat() calculates CPU utilization. // IBM documentation: -// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html -// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html +// - https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html +// - https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) { return nil, fmt.Errorf("not implemented") } diff --git a/vendor/github.com/power-devops/perfstat/fsstat.go b/vendor/github.com/power-devops/perfstat/fsstat.go index 27f4c06c158..d3913197acb 100644 --- a/vendor/github.com/power-devops/perfstat/fsstat.go +++ b/vendor/github.com/power-devops/perfstat/fsstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/helpers.go b/vendor/github.com/power-devops/perfstat/helpers.go index 1b13eb561e2..d5268ab53ac 100644 --- a/vendor/github.com/power-devops/perfstat/helpers.go +++ b/vendor/github.com/power-devops/perfstat/helpers.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -7,6 +8,7 @@ package perfstat #include #include +#include #include "c_helpers.h" */ @@ -762,3 +764,56 @@ func fsinfo2filesystem(n *C.struct_fsinfo) FileSystem { return i } + +func lparinfo2partinfo(n C.lpar_info_format2_t) PartitionInfo { + var i PartitionInfo + + i.Version = int(n.version) + i.OnlineMemory = uint64(n.online_memory) + i.TotalDispatchTime = uint64(n.tot_dispatch_time) + i.PoolIdleTime = uint64(n.pool_idle_time) + i.DispatchLatency = uint64(n.dispatch_latency) + i.LparFlags = uint(n.lpar_flags) + i.PCpusInSys = uint(n.pcpus_in_sys) + i.OnlineVCpus = uint(n.online_vcpus) + i.OnlineLCpus = uint(n.online_lcpus) + i.PCpusInPool = uint(n.pcpus_in_pool) + i.UnallocCapacity = uint(n.unalloc_capacity) + i.EntitledCapacity = uint(n.entitled_capacity) + i.VariableWeight = uint(n.variable_weight) + i.UnallocWeight = uint(n.unalloc_weight) + i.MinReqVCpuCapacity = uint(n.min_req_vcpu_capacity) + i.GroupId = uint8(n.group_id) + i.PoolId = uint8(n.pool_id) + i.ShCpusInSys = uint(n.shcpus_in_sys) + i.MaxPoolCapacity = uint(n.max_pool_capacity) + i.EntitledPoolCapacity = uint(n.entitled_pool_capacity) + i.PoolMaxTime = uint64(n.pool_max_time) + i.PoolBusyTime = uint64(n.pool_busy_time) + i.PoolScaledBusyTime = uint64(n.pool_scaled_busy_time) + i.ShCpuTotalTime = uint64(n.shcpu_tot_time) + i.ShCpuBusyTime = uint64(n.shcpu_busy_time) + i.ShCpuScaledBusyTime = uint64(n.shcpu_scaled_busy_time) + i.EntMemCapacity = uint64(n.ent_mem_capacity) + i.PhysMem = uint64(n.phys_mem) + i.VrmPoolPhysMem = uint64(n.vrm_pool_physmem) + i.HypPageSize = uint(n.hyp_pagesize) + i.VrmPoolId = int(n.vrm_pool_id) + i.VrmGroupId = int(n.vrm_group_id) + i.VarMemWeight = int(n.var_mem_weight) + i.UnallocVarMemWeight = int(n.unalloc_var_mem_weight) + i.UnallocEntMemCapacity = uint64(n.unalloc_ent_mem_capacity) + i.TrueOnlineMemory = uint64(n.true_online_memory) + i.AmeOnlineMemory = uint64(n.ame_online_memory) + i.AmeType = uint8(n.ame_type) + i.SpecExecMode = uint8(n.spec_exec_mode) + i.AmeFactor = uint(n.ame_factor) + i.EmPartMajorCode = uint(n.em_part_major_code) + i.EmPartMinorCode = uint(n.em_part_minor_code) + i.BytesCoalesced = uint64(n.bytes_coalesced) + i.BytesCoalescedMemPool = uint64(n.bytes_coalesced_mempool) + i.PurrCoalescing = uint64(n.purr_coalescing) + i.SpurrCoalescing = uint64(n.spurr_coalescing) + + return i +} diff --git a/vendor/github.com/power-devops/perfstat/lparstat.go b/vendor/github.com/power-devops/perfstat/lparstat.go index 0ce35e3c562..470a1af2f91 100644 --- a/vendor/github.com/power-devops/perfstat/lparstat.go +++ b/vendor/github.com/power-devops/perfstat/lparstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -6,11 +7,13 @@ package perfstat #cgo LDFLAGS: -lperfstat #include +#include */ import "C" import ( "fmt" + "unsafe" ) func PartitionStat() (*PartitionConfig, error) { @@ -24,3 +27,14 @@ func PartitionStat() (*PartitionConfig, error) { return &p, nil } + +func LparInfo() (*PartitionInfo, error) { + var pinfo C.lpar_info_format2_t + + rc := C.lpar_get_info(C.LPAR_INFO_FORMAT2, unsafe.Pointer(&pinfo), C.sizeof_lpar_info_format2_t) + if rc != 0 { + return nil, fmt.Errorf("lpar_get_info() error") + } + p := lparinfo2partinfo(pinfo) + return &p, nil +} diff --git a/vendor/github.com/power-devops/perfstat/lvmstat.go b/vendor/github.com/power-devops/perfstat/lvmstat.go index eb2064c8046..2ce99086ad0 100644 --- a/vendor/github.com/power-devops/perfstat/lvmstat.go +++ b/vendor/github.com/power-devops/perfstat/lvmstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/memstat.go b/vendor/github.com/power-devops/perfstat/memstat.go index d211a73aac8..52133f0a848 100644 --- a/vendor/github.com/power-devops/perfstat/memstat.go +++ b/vendor/github.com/power-devops/perfstat/memstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/netstat.go b/vendor/github.com/power-devops/perfstat/netstat.go index 4070da211bc..847d2946eec 100644 --- a/vendor/github.com/power-devops/perfstat/netstat.go +++ b/vendor/github.com/power-devops/perfstat/netstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/procstat.go b/vendor/github.com/power-devops/perfstat/procstat.go index ecafebd8db2..957ec2b33ac 100644 --- a/vendor/github.com/power-devops/perfstat/procstat.go +++ b/vendor/github.com/power-devops/perfstat/procstat.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/sysconf.go b/vendor/github.com/power-devops/perfstat/sysconf.go index c7454d03d49..b557da0deaa 100644 --- a/vendor/github.com/power-devops/perfstat/sysconf.go +++ b/vendor/github.com/power-devops/perfstat/sysconf.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/power-devops/perfstat/systemcfg.go b/vendor/github.com/power-devops/perfstat/systemcfg.go index 6287eb46ab8..b7c7b72592e 100644 --- a/vendor/github.com/power-devops/perfstat/systemcfg.go +++ b/vendor/github.com/power-devops/perfstat/systemcfg.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat @@ -70,6 +71,7 @@ const ( SC_TM_VER = 59 /* Transaction Memory version, 0 - not capable */ SC_NX_CAP = 60 /* NX GZIP capable */ SC_PKS_STATE = 61 /* Platform KeyStore */ + SC_MMA_VER = 62 ) /* kernel attributes */ @@ -119,6 +121,7 @@ const ( IMPL_POWER7 = 0x8000 /* 7 class CPU */ IMPL_POWER8 = 0x10000 /* 8 class CPU */ IMPL_POWER9 = 0x20000 /* 9 class CPU */ + IMPL_POWER10 = 0x20000 /* 10 class CPU */ ) // Values for implementation field for IA64 Architectures @@ -151,11 +154,13 @@ const ( PV_7 = 0x200000 /* Power PC 7 */ PV_8 = 0x300000 /* Power PC 8 */ PV_9 = 0x400000 /* Power PC 9 */ + PV_10 = 0x500000 /* Power PC 10 */ PV_5_Compat = 0x0F8000 /* Power PC 5 */ PV_6_Compat = 0x108000 /* Power PC 6 */ PV_7_Compat = 0x208000 /* Power PC 7 */ PV_8_Compat = 0x308000 /* Power PC 8 */ PV_9_Compat = 0x408000 /* Power PC 9 */ + PV_10_Compat = 0x508000 /* Power PC 10 */ PV_RESERVED_2 = 0x0A0000 /* source compatability */ PV_RESERVED_3 = 0x0B0000 /* source compatability */ PV_RS2 = 0x040000 /* Power RS2 */ @@ -181,19 +186,21 @@ const ( // Macros for identifying physical processor const ( - PPI4_1 = 0x35 - PPI4_2 = 0x38 - PPI4_3 = 0x39 - PPI4_4 = 0x3C - PPI4_5 = 0x44 - PPI5_1 = 0x3A - PPI5_2 = 0x3B - PPI6_1 = 0x3E - PPI7_1 = 0x3F - PPI7_2 = 0x4A - PPI8_1 = 0x4B - PPI8_2 = 0x4D - PPI9 = 0x4E + PPI4_1 = 0x35 + PPI4_2 = 0x38 + PPI4_3 = 0x39 + PPI4_4 = 0x3C + PPI4_5 = 0x44 + PPI5_1 = 0x3A + PPI5_2 = 0x3B + PPI6_1 = 0x3E + PPI7_1 = 0x3F + PPI7_2 = 0x4A + PPI8_1 = 0x4B + PPI8_2 = 0x4D + PPI9 = 0x4E + PPI9_1 = 0x4E + PPI10_1 = 0x80 ) // Macros for kernel attributes @@ -291,14 +298,32 @@ func GetCPUImplementation() string { return "POWER8" case impl&IMPL_POWER9 != 0: return "POWER9" + case impl&IMPL_POWER10 != 0: + return "Power10" default: return "Unknown" } } +func POWER10OrNewer() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER10 != 0 { + return true + } + return false +} + +func POWER10() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER10 != 0 { + return true + } + return false +} + func POWER9OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 { return true } return false @@ -314,7 +339,7 @@ func POWER9() bool { func POWER8OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { return true } return false @@ -330,7 +355,7 @@ func POWER8() bool { func POWER7OrNewer() bool { impl := unix.Getsystemcfg(SC_IMPL) - if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { + if impl&IMPL_POWER10 != 0 || impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { return true } return false @@ -419,6 +444,8 @@ func PksEnabled() bool { func CPUMode() string { impl := unix.Getsystemcfg(SC_VERS) switch impl { + case PV_10, PV_10_Compat: + return "Power10" case PV_9, PV_9_Compat: return "POWER9" case PV_8, PV_8_Compat: diff --git a/vendor/github.com/power-devops/perfstat/types_disk.go b/vendor/github.com/power-devops/perfstat/types_disk.go index ca1493d8726..50e323dbe0b 100644 --- a/vendor/github.com/power-devops/perfstat/types_disk.go +++ b/vendor/github.com/power-devops/perfstat/types_disk.go @@ -29,8 +29,8 @@ type DiskTotal struct { // Disk Adapter Types const ( DA_SCSI = 0 /* 0 ==> SCSI, SAS, other legacy adapter types */ - DA_VSCSI /* 1 ==> Virtual SCSI/SAS Adapter */ - DA_FCA /* 2 ==> Fiber Channel Adapter */ + DA_VSCSI = 1 /* 1 ==> Virtual SCSI/SAS Adapter */ + DA_FCA = 2 /* 2 ==> Fiber Channel Adapter */ ) type DiskAdapter struct { diff --git a/vendor/github.com/power-devops/perfstat/types_lpar.go b/vendor/github.com/power-devops/perfstat/types_lpar.go index 2d3c32fa8cb..f95f8c300c8 100644 --- a/vendor/github.com/power-devops/perfstat/types_lpar.go +++ b/vendor/github.com/power-devops/perfstat/types_lpar.go @@ -66,3 +66,64 @@ type PartitionConfig struct { TargetMemExpSize int64 /* Expanded Memory Size in MB */ SubProcessorMode int32 /* Split core mode, its value can be 0,1,2 or 4. 0 for unsupported, 1 for capable but not enabled, 2 or 4 for enabled*/ } + +const ( + AME_TYPE_V1 = 0x1 + AME_TYPE_V2 = 0x2 + LPAR_INFO_CAPPED = 0x01 /* Parition Capped */ + LPAR_INFO_AUTH_PIC = 0x02 /* Authority granted for poolidle*/ + LPAR_INFO_SMT_ENABLED = 0x04 /* SMT Enabled */ + LPAR_INFO_WPAR_ACTIVE = 0x08 /* Process Running Within a WPAR */ + LPAR_INFO_EXTENDED = 0x10 /* Extended shared processor pool information */ + LPAR_INFO_AME_ENABLED = 0x20 /* Active Mem. Expansion (AME) enabled*/ + LPAR_INFO_SEM_ENABLED = 0x40 /* Speculative Execution Mode enabled */ +) + +type PartitionInfo struct { + Version int /* version for this structure */ + OnlineMemory uint64 /* MB of currently online memory */ + TotalDispatchTime uint64 /* Total lpar dispatch time in nsecs */ + PoolIdleTime uint64 /* Idle time of shared CPU pool nsecs*/ + DispatchLatency uint64 /* Max latency inbetween dispatches of this LPAR on physCPUS in nsecs */ + LparFlags uint /* LPAR flags */ + PCpusInSys uint /* # of active licensed physical CPUs in system */ + OnlineVCpus uint /* # of current online virtual CPUs */ + OnlineLCpus uint /* # of current online logical CPUs */ + PCpusInPool uint /* # physical CPUs in shared pool */ + UnallocCapacity uint /* Unallocated Capacity available in shared pool */ + EntitledCapacity uint /* Entitled Processor Capacity for this partition */ + VariableWeight uint /* Variable Processor Capacity Weight */ + UnallocWeight uint /* Unallocated Variable Weight available for this partition */ + MinReqVCpuCapacity uint /* OS minimum required virtual processor capacity. */ + GroupId uint8 /* ID of a LPAR group/aggregation */ + PoolId uint8 /* ID of a shared pool */ + ShCpusInSys uint /* # of physical processors allocated for shared processor use */ + MaxPoolCapacity uint /* Maximum processor capacity of partition's pool */ + EntitledPoolCapacity uint /* Entitled processor capacity of partition's pool */ + PoolMaxTime uint64 /* Summation of maximum time that could be consumed by the pool, in nanoseconds */ + PoolBusyTime uint64 /* Summation of busy time accumulated across all partitions in the pool, in nanoseconds */ + PoolScaledBusyTime uint64 /* Scaled summation of busy time accumulated across all partitions in the pool, in nanoseconds */ + ShCpuTotalTime uint64 /* Summation of total time across all physical processors allocated for shared processor use, in nanoseconds */ + ShCpuBusyTime uint64 /* Summation of busy time accumulated across all shared processor partitions, in nanoseconds */ + ShCpuScaledBusyTime uint64 /* Scaled summation of busy time accumulated across all shared processor partitions, in nanoseconds */ + EntMemCapacity uint64 /* Partition's current entitlement memory capacity setting */ + PhysMem uint64 /* Amount of physical memory, in bytes, currently backing the partition's logical memory */ + VrmPoolPhysMem uint64 /* Total amount of physical memory in the VRM pool */ + HypPageSize uint /* Page size hypervisor is using to virtualize partition's memory */ + VrmPoolId int /* ID of VRM pool */ + VrmGroupId int /* eWLM VRM group to which partition belongs */ + VarMemWeight int /* Partition's current variable memory capacity weighting setting */ + UnallocVarMemWeight int /* Amount of unallocated variable memory capacity weight available to LPAR's group */ + UnallocEntMemCapacity uint64 /* Amount of unallocated I/O memory entitlement available to LPAR's group */ + TrueOnlineMemory uint64 /* true MB of currently online memory */ + AmeOnlineMemory uint64 /* AME MB of currently online memory */ + AmeType uint8 + SpecExecMode uint8 /* Speculative Execution Mode */ + AmeFactor uint /* memory expansion factor for LPAR */ + EmPartMajorCode uint /* Major and minor codes for our */ + EmPartMinorCode uint /* current energy management mode */ + BytesCoalesced uint64 /* The number of bytes of the calling partition.s logical real memory coalesced because they contained duplicated data */ + BytesCoalescedMemPool uint64 /* If the calling partition is authorized to see pool wide statistics then the number of bytes of logical real memory coalesced because they contained duplicated data in the calling partition.s memory pool else set to zero.*/ + PurrCoalescing uint64 /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/ + SpurrCoalescing uint64 /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero.*/ +} diff --git a/vendor/github.com/power-devops/perfstat/uptime.go b/vendor/github.com/power-devops/perfstat/uptime.go index 2bd3e568d2d..86087874796 100644 --- a/vendor/github.com/power-devops/perfstat/uptime.go +++ b/vendor/github.com/power-devops/perfstat/uptime.go @@ -1,3 +1,4 @@ +//go:build aix // +build aix package perfstat diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30ee9..b9cc55abbb0 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 00000000000..65d761bc9f2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 00000000000..8547c8dfd18 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 00000000000..2e45780b74b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa10e0..cc4ef1077e8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5e0d..520cbd7d418 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64f43..51174641729 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb395a..519db348a74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d6444..a4fa6eabd78 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e57237d..9d9b81ab448 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18ed5..62a4e7ad9a0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ceea3..14d56d2d068 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b83b..315eab5f179 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fbead..e598e66e688 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25a02..c6fd2f58b74 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1462704446c..1ab0e479655 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go index f52ad9eab67..e1441598da8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -30,4 +30,5 @@ var defaultValidations = []Validation{ validations.LintReservedChars, validations.LintCamelCase, validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 00000000000..fdc1e623948 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "fmt" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, fmt.Errorf("metric not unique")) + break + } + } + } + + return problems +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go index bc8dbd1e16b..de52cfee443 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -44,21 +44,21 @@ func LintMetricUnits(mf *dto.MetricFamily) []error { return problems } -// LintMetricTypeInName detects when metric types are included in the metric name. +// LintMetricTypeInName detects when the metric type is included in the metric name. func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + var problems []error - n := strings.ToLower(mf.GetName()) - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) - } + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) } + return problems } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 9dce15eafa2..6f1200180a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -42,9 +42,8 @@ import ( "fmt" "io" "net/http" - "reflect" - "github.com/davecgh/go-spew/spew" + "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" "google.golang.org/protobuf/proto" @@ -159,6 +158,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -184,9 +186,11 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err return compareMetricFamilies(scraped, wanted, metricNames...) } -// CollectAndCompare registers the provided Collector with a newly created -// pedantic Registry. It then calls GatherAndCompare with that Registry and with -// the provided metricNames. +// CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text +// exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -199,6 +203,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -207,6 +214,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -222,6 +232,31 @@ func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected return compareMetricFamilies(got, wanted, metricNames...) } +// CollectAndFormat collects the metrics identified by `metricNames` and returns them in the given format. +func CollectAndFormat(c prometheus.Collector, format expfmt.FormatType, metricNames ...string) ([]byte, error) { + reg := prometheus.NewPedanticRegistry() + if err := reg.Register(c); err != nil { + return nil, fmt.Errorf("registering collector failed: %w", err) + } + + gotFiltered, err := reg.Gather() + if err != nil { + return nil, fmt.Errorf("gathering metrics failed: %w", err) + } + + gotFiltered = filterMetrics(gotFiltered, metricNames) + + var gotFormatted bytes.Buffer + enc := expfmt.NewEncoder(&gotFormatted, expfmt.NewFormat(format)) + for _, mf := range gotFiltered { + if err := enc.Encode(mf); err != nil { + return nil, fmt.Errorf("encoding gathered metrics failed: %w", err) + } + } + + return gotFormatted.Bytes(), nil +} + // convertReaderToMetricFamily would read from a io.Reader object and convert it to a slice of // dto.MetricFamily. func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error) { @@ -277,73 +312,12 @@ func compare(got, want []*dto.MetricFamily) error { return fmt.Errorf("encoding expected metrics failed: %w", err) } } - if diffErr := diff(wantBuf, gotBuf); diffErr != "" { + if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { return fmt.Errorf(diffErr) } return nil } -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - c := spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - } - if et != reflect.TypeOf("") { - e = c.Sdump(expected) - a = c.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := internal.GetUnifiedDiffString(internal.UnifiedDiff{ - A: internal.SplitLines(e), - B: internal.SplitLines(a), - FromFile: "metric output does not match expectation; want", - FromDate: "", - ToFile: "got:", - ToDate: "", - Context: 1, - }) - - if diff == "" { - return "" - } - - return "\n\nDiff:\n" + diff -} - -// typeAndKind returns the type and kind of the given interface{} -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59f83..2c808eece0a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 4a0be4a10e9..7276742ec90 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -52,14 +52,6 @@ var reservedHeaders = map[string]struct{}{ // Headers represents the configuration for HTTP headers. type Headers struct { Headers map[string]Header `yaml:",inline"` - dir string -} - -// Header represents the configuration for a single HTTP header. -type Header struct { - Values []string `yaml:"values,omitempty" json:"values,omitempty"` - Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` - Files []string `yaml:"files,omitempty" json:"files,omitempty"` } func (h Headers) MarshalJSON() ([]byte, error) { @@ -67,32 +59,40 @@ func (h Headers) MarshalJSON() ([]byte, error) { return json.Marshal(h.Headers) } -// SetDirectory records the directory to make headers file relative to the -// configuration file. +// SetDirectory make headers file relative to the configuration file. func (h *Headers) SetDirectory(dir string) { if h == nil { return } - h.dir = dir + for _, h := range h.Headers { + h.SetDirectory(dir) + } } // Validate validates the Headers config. func (h *Headers) Validate() error { - for n, header := range h.Headers { + for n := range h.Headers { if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } - for _, v := range header.Files { - f := JoinDir(h.dir, v) - _, err := os.ReadFile(f) - if err != nil { - return fmt.Errorf("unable to read header %q from file %s: %w", http.CanonicalHeaderKey(n), f, err) - } - } } return nil } +// Header represents the configuration for a single HTTP header. +type Header struct { + Values []string `yaml:"values,omitempty" json:"values,omitempty"` + Secrets []Secret `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Files []string `yaml:"files,omitempty" json:"files,omitempty"` +} + +// SetDirectory makes headers file relative to the configuration file. +func (h *Header) SetDirectory(dir string) { + for i := range h.Files { + h.Files[i] = JoinDir(dir, h.Files[i]) + } +} + // NewHeadersRoundTripper returns a RoundTripper that sets HTTP headers on // requests as configured. func NewHeadersRoundTripper(config *Headers, next http.RoundTripper) http.RoundTripper { @@ -121,10 +121,9 @@ func (rt *headersRoundTripper) RoundTrip(req *http.Request) (*http.Response, err req.Header.Add(n, string(v)) } for _, v := range h.Files { - f := JoinDir(rt.config.dir, v) - b, err := os.ReadFile(f) + b, err := os.ReadFile(v) if err != nil { - return nil, fmt.Errorf("unable to read headers file %s: %w", f, err) + return nil, fmt.Errorf("unable to read headers file %s: %w", v, err) } req.Header.Add(n, strings.TrimSpace(string(b))) } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 3e320134776..57ec252adff 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -357,33 +357,33 @@ func nonZeroCount[T comparable](values ...T) int { func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { - return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") + return errors.New("at most one of bearer_token & bearer_token_file must be configured") } if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { - return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") + return errors.New("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") } if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Username) != "", c.BasicAuth.UsernameFile != "", c.BasicAuth.UsernameRef != "") > 1 { - return fmt.Errorf("at most one of basic_auth username, username_file & username_ref must be configured") + return errors.New("at most one of basic_auth username, username_file & username_ref must be configured") } if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Password) != "", c.BasicAuth.PasswordFile != "", c.BasicAuth.PasswordRef != "") > 1 { - return fmt.Errorf("at most one of basic_auth password, password_file & password_ref must be configured") + return errors.New("at most one of basic_auth password, password_file & password_ref must be configured") } if c.Authorization != nil { if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 { - return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file") + return errors.New("authorization is not compatible with bearer_token & bearer_token_file") } if nonZeroCount(string(c.Authorization.Credentials) != "", c.Authorization.CredentialsFile != "", c.Authorization.CredentialsRef != "") > 1 { - return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured") + return errors.New("at most one of authorization credentials & credentials_file must be configured") } c.Authorization.Type = strings.TrimSpace(c.Authorization.Type) if len(c.Authorization.Type) == 0 { c.Authorization.Type = "Bearer" } if strings.ToLower(c.Authorization.Type) == "basic" { - return fmt.Errorf(`authorization type cannot be set to "basic", use "basic_auth" instead`) + return errors.New(`authorization type cannot be set to "basic", use "basic_auth" instead`) } if c.BasicAuth != nil || c.OAuth2 != nil { - return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + return errors.New("at most one of basic_auth, oauth2 & authorization must be configured") } } else { if len(c.BearerToken) > 0 { @@ -399,16 +399,16 @@ func (c *HTTPClientConfig) Validate() error { } if c.OAuth2 != nil { if c.BasicAuth != nil { - return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + return errors.New("at most one of basic_auth, oauth2 & authorization must be configured") } if len(c.OAuth2.ClientID) == 0 { - return fmt.Errorf("oauth2 client_id must be configured") + return errors.New("oauth2 client_id must be configured") } if len(c.OAuth2.TokenURL) == 0 { - return fmt.Errorf("oauth2 token_url must be configured") + return errors.New("oauth2 token_url must be configured") } if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 { - return fmt.Errorf("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") + return errors.New("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") } } if err := c.ProxyConfig.Validate(); err != nil { @@ -679,8 +679,8 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon if err != nil { return nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { - // No need for a RoundTripper that reloads the CA file automatically. + if tlsSettings.immutable() { + // No need for a RoundTripper that reloads the files automatically. return newRT(tlsConfig) } return NewTLSRoundTripperWithContext(ctx, tlsConfig, tlsSettings, newRT) @@ -735,7 +735,7 @@ func (s *FileSecret) Fetch(ctx context.Context) (string, error) { } func (s *FileSecret) Description() string { - return fmt.Sprintf("file %s", s.file) + return "file " + s.file } func (s *FileSecret) Immutable() bool { @@ -753,7 +753,7 @@ func (s *refSecret) Fetch(ctx context.Context) (string, error) { } func (s *refSecret) Description() string { - return fmt.Sprintf("ref %s", s.ref) + return "ref " + s.ref } func (s *refSecret) Immutable() bool { @@ -828,7 +828,7 @@ type basicAuthRoundTripper struct { // NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a request unless it has // already been set. -func NewBasicAuthRoundTripper(username SecretReader, password SecretReader, rt http.RoundTripper) http.RoundTripper { +func NewBasicAuthRoundTripper(username, password SecretReader, rt http.RoundTripper) http.RoundTripper { return &basicAuthRoundTripper{username, password, rt} } @@ -914,7 +914,7 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str if err != nil { return nil, nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { + if tlsSettings.immutable() { t, _ = tlsTransport(tlsConfig) } else { t, err = NewTLSRoundTripperWithContext(req.Context(), tlsConfig, tlsSettings, tlsTransport) @@ -964,7 +964,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro } rt.mtx.Lock() - rt.lastSecret = secret + rt.lastSecret = newSecret rt.lastRT.Source = source if rt.client != nil { rt.client.CloseIdleConnections() @@ -1045,7 +1045,7 @@ func NewTLSConfigWithContext(ctx context.Context, cfg *TLSConfig, optFuncs ...TL if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { if cfg.MaxVersion < cfg.MinVersion { - return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + return nil, errors.New("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") } } @@ -1144,19 +1144,19 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // used. func (c *TLSConfig) Validate() error { if nonZeroCount(len(c.CA) > 0, len(c.CAFile) > 0, len(c.CARef) > 0) > 1 { - return fmt.Errorf("at most one of ca, ca_file & ca_ref must be configured") + return errors.New("at most one of ca, ca_file & ca_ref must be configured") } if nonZeroCount(len(c.Cert) > 0, len(c.CertFile) > 0, len(c.CertRef) > 0) > 1 { - return fmt.Errorf("at most one of cert, cert_file & cert_ref must be configured") + return errors.New("at most one of cert, cert_file & cert_ref must be configured") } if nonZeroCount(len(c.Key) > 0, len(c.KeyFile) > 0, len(c.KeyRef) > 0) > 1 { - return fmt.Errorf("at most one of key and key_file must be configured") + return errors.New("at most one of key and key_file must be configured") } if c.usingClientCert() && !c.usingClientKey() { - return fmt.Errorf("exactly one of key or key_file must be configured when a client certificate is configured") + return errors.New("exactly one of key or key_file must be configured when a client certificate is configured") } else if c.usingClientKey() && !c.usingClientCert() { - return fmt.Errorf("exactly one of cert or cert_file must be configured when a client key is configured") + return errors.New("exactly one of cert or cert_file must be configured when a client key is configured") } return nil @@ -1259,6 +1259,10 @@ type TLSRoundTripperSettings struct { Key SecretReader } +func (t *TLSRoundTripperSettings) immutable() bool { + return (t.CA == nil || t.CA.Immutable()) && (t.Cert == nil || t.Cert.Immutable()) && (t.Key == nil || t.Key.Immutable()) +} + func NewTLSRoundTripper( cfg *tls.Config, settings TLSRoundTripperSettings, @@ -1456,16 +1460,16 @@ type ProxyConfig struct { // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *ProxyConfig) Validate() error { if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) { - return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") + return errors.New("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") } if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" { - return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured") + return errors.New("if proxy_from_environment is configured, proxy_url must not be configured") } if c.ProxyFromEnvironment && c.NoProxy != "" { - return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured") + return errors.New("if proxy_from_environment is configured, no_proxy must not be configured") } if c.ProxyURL.URL == nil && c.NoProxy != "" { - return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured") + return errors.New("if no_proxy is configured, proxy_url must also be configured") } return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21643..1448439b7f7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9d92..d7f3d76f55d 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd178..b26886560d7 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93f92..f1c495dd606 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a9ec..4b86434b332 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211af2..b4607fe4d27 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { @@ -775,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 80d1fe944ea..bd3a39e3e14 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22ff7..73b7aa3e60b 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e5..abb2c900183 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e71..00000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a59c..0daca836afa 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -34,10 +36,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -440,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: @@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 910b0b71fcc..8f91a9702e0 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2ab4b..6bfc757d18b 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038cfff..895e6a3e839 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 197d95e5c8b..61ed1ba314b 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -90,6 +90,14 @@ func GetTags() string { return computedTags } +func PrometheusUserAgent() string { + return ComponentUserAgent("Prometheus") +} + +func ComponentUserAgent(component string) string { + return component + "/" + Version +} + func init() { computedRevision, computedTags = computeRevision() } diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 25e67db6330..f6b79f3a466 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -2356,6 +2356,11 @@ loop: } else { histograms = append(histograms, HPoint{H: &histogram.FloatHistogram{}}) } + if histograms[n].H == nil { + // Make sure to pass non zero H to AtFloatHistogram so that it does a deep-copy. + // Not an issue in the loop above since that uses an intermediate buffer. + histograms[n].H = &histogram.FloatHistogram{} + } histograms[n].T, histograms[n].H = it.AtFloatHistogram(histograms[n].H) if value.IsStaleNaN(histograms[n].H.Sum) { histograms = histograms[:n] diff --git a/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go b/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go index dfafea5f8ca..459d5924aec 100644 --- a/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go +++ b/vendor/github.com/prometheus/prometheus/promql/histogram_stats_iterator.go @@ -48,7 +48,6 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi var t int64 t, f.currentH = f.Iterator.AtHistogram(f.currentH) if value.IsStaleNaN(f.currentH.Sum) { - f.setLastH(f.currentH) h = &histogram.Histogram{Sum: f.currentH.Sum} return t, h } @@ -63,9 +62,13 @@ func (f *histogramStatsIterator) AtHistogram(h *histogram.Histogram) (int64, *hi return t, h } - h.CounterResetHint = f.getResetHint(f.currentH) - h.Count = f.currentH.Count - h.Sum = f.currentH.Sum + returnValue := histogram.Histogram{ + CounterResetHint: f.getResetHint(f.currentH), + Count: f.currentH.Count, + Sum: f.currentH.Sum, + } + returnValue.CopyTo(h) + f.setLastH(f.currentH) return t, h } @@ -77,7 +80,6 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) var t int64 t, f.currentFH = f.Iterator.AtFloatHistogram(f.currentFH) if value.IsStaleNaN(f.currentFH.Sum) { - f.setLastFH(f.currentFH) return t, &histogram.FloatHistogram{Sum: f.currentFH.Sum} } @@ -91,9 +93,13 @@ func (f *histogramStatsIterator) AtFloatHistogram(fh *histogram.FloatHistogram) return t, fh } - fh.CounterResetHint = f.getFloatResetHint(f.currentFH.CounterResetHint) - fh.Count = f.currentFH.Count - fh.Sum = f.currentFH.Sum + returnValue := histogram.FloatHistogram{ + CounterResetHint: f.getFloatResetHint(f.currentFH.CounterResetHint), + Count: f.currentFH.Count, + Sum: f.currentFH.Sum, + } + returnValue.CopyTo(fh) + f.setLastFH(f.currentFH) return t, fh } diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 68411a62e01..ccb068b6805 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -1631,7 +1631,7 @@ loop: updateMetadata(lset, true) } - if seriesAlreadyScraped { + if seriesAlreadyScraped && parsedTimestamp == nil { err = storage.ErrDuplicateSampleForTimestamp } else { if ctMs := p.CreatedTimestamp(); sl.enableCTZeroIngestion && ctMs != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go index 3c7349cf729..ce934a638d9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -30,12 +30,10 @@ func GenerateTestHistograms(n int) (r []*histogram.Histogram) { return r } -func GenerateTestHistogramsWithUnknownResetHint(n int) []*histogram.Histogram { - hs := GenerateTestHistograms(n) - for i := range hs { - hs[i].CounterResetHint = histogram.UnknownCounterReset - } - return hs +func GenerateTestHistogramWithHint(n int, hint histogram.CounterResetHint) *histogram.Histogram { + h := GenerateTestHistogram(n) + h.CounterResetHint = hint + return h } // GenerateTestHistogram but it is up to the user to set any known counter reset hint. diff --git a/vendor/github.com/relvacode/iso8601/.gitignore b/vendor/github.com/relvacode/iso8601/.gitignore index daf913b1b34..11b90db8d96 100644 --- a/vendor/github.com/relvacode/iso8601/.gitignore +++ b/vendor/github.com/relvacode/iso8601/.gitignore @@ -22,3 +22,5 @@ _testmain.go *.exe *.test *.prof + +.idea diff --git a/vendor/github.com/relvacode/iso8601/README.md b/vendor/github.com/relvacode/iso8601/README.md index f52d571352d..5c2c1df7772 100644 --- a/vendor/github.com/relvacode/iso8601/README.md +++ b/vendor/github.com/relvacode/iso8601/README.md @@ -33,21 +33,9 @@ func main() { ## Benchmark ``` -BenchmarkParse-16 13364954 77.7 ns/op 0 B/op 0 allocs/op +goos: linux +goarch: amd64 +pkg: github.com/relvacode/iso8601 +cpu: AMD Ryzen 7 7840U w/ Radeon 780M Graphics +BenchmarkParse-16 35880919 30.89 ns/op 0 B/op 0 allocs/op ``` - -## Release History - - - `1.3.0` - - Allow a leading `+` sign in the year component [#11](https://github.com/relvacode/iso8601/issues/11) - - `1.2.0` - - Time range validity checking equivalent to the standard library. - Note that previous versions would not validate that a given date string was in the expected range. Additionally, this version no longer accepts `0000-00-00T00:00:00` as a valid input which can be the zero time representation in other languages nor does it support leap seconds (such that the seconds field is `60`) as is the case in the [standard library](https://github.com/golang/go/issues/15247) - - `1.1.0` - - Check for `-0` time zone - - `1.0.0` - - Initial release diff --git a/vendor/github.com/relvacode/iso8601/iso8601.go b/vendor/github.com/relvacode/iso8601/iso8601.go index fd4c57756d6..266d871c73b 100644 --- a/vendor/github.com/relvacode/iso8601/iso8601.go +++ b/vendor/github.com/relvacode/iso8601/iso8601.go @@ -27,6 +27,7 @@ const ( // ParseISOZone parses the 5 character zone information in an ISO8601 date string. // This function expects input that matches: // +// Z, z (UTC) // -0100 // +0100 // +01:00 @@ -35,11 +36,13 @@ const ( // +01:45 // +0145 func ParseISOZone(inp []byte) (*time.Location, error) { - if len(inp) < 3 || len(inp) > 6 { + if len(inp) != 1 && (len(inp) < 3 || len(inp) > 6) { return nil, ErrZoneCharacters } var neg bool switch inp[0] { + case 'Z', 'z': + return time.UTC, nil case '+': case '-': neg = true @@ -87,6 +90,12 @@ func ParseISOZone(inp []byte) (*time.Location, error) { // Parse parses an ISO8601 compliant date-time byte slice into a time.Time object. // If any component of an input date-time is not within the expected range then an *iso8601.RangeError is returned. func Parse(inp []byte) (time.Time, error) { + return ParseInLocation(inp, time.UTC) +} + +// ParseInLocation parses an ISO8601 compliant date-time byte slice into a time.Time object. +// If the input does not have timezone information, it will use the given location. +func ParseInLocation(inp []byte, loc *time.Location) (time.Time, error) { var ( Y uint M uint @@ -98,9 +107,6 @@ func Parse(inp []byte) (time.Time, error) { nfraction = 1 //counts amount of precision for the second fraction ) - // Always assume UTC by default - var loc = time.UTC - var c uint var p = year @@ -131,7 +137,7 @@ parse: continue } fallthrough - case '+': + case '+', 'Z': if i == 0 { // The ISO8601 technically allows signed year components. // Go does not allow negative years, but let's allow a positive sign to be more compatible with the spec. @@ -185,23 +191,6 @@ parse: s = c c = 0 p++ - case 'Z': - switch p { - case hour: - h = c - case minute: - m = c - case second: - s = c - case millisecond: - fraction = int(c) - default: - return time.Time{}, newUnexpectedCharacterError(inp[i]) - } - c = 0 - if len(inp) != i+1 { - return time.Time{}, ErrRemainingData - } default: return time.Time{}, newUnexpectedCharacterError(inp[i]) } @@ -215,6 +204,9 @@ parse: Y = c M = 1 d = 1 + case month: + M = c + d = 1 case day: d = c case hour: @@ -287,3 +279,9 @@ parse: func ParseString(inp string) (time.Time, error) { return Parse([]byte(inp)) } + +// ParseStringInLocation parses an ISO8601 compliant date-time string into a time.Time object. +// If the input does not have timezone information, it will use the given location. +func ParseStringInLocation(inp string, loc *time.Location) (time.Time, error) { + return ParseInLocation([]byte(inp), loc) +} diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go index da80d343b3d..724f242ac6f 100644 --- a/vendor/github.com/rs/cors/cors.go +++ b/vendor/github.com/rs/cors/cors.go @@ -364,9 +364,11 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { // Note: the Fetch standard guarantees that at most one // Access-Control-Request-Headers header is present in the preflight request; // see step 5.2 in https://fetch.spec.whatwg.org/#cors-preflight-fetch-0. - reqHeaders, found := first(r.Header, "Access-Control-Request-Headers") - if found && !c.allowedHeadersAll && !c.allowedHeaders.Subsumes(reqHeaders[0]) { - c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders[0]) + // However, some gateways split that header into multiple headers of the same name; + // see https://github.com/rs/cors/issues/184. + reqHeaders, found := r.Header["Access-Control-Request-Headers"] + if found && !c.allowedHeadersAll && !c.allowedHeaders.Accepts(reqHeaders) { + c.logf(" Preflight aborted: headers '%v' not allowed", reqHeaders) return } if c.allowedOriginsAll { @@ -391,9 +393,7 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { if len(c.maxAge) > 0 { headers["Access-Control-Max-Age"] = c.maxAge } - if c.Log != nil { - c.logf(" Preflight response headers: %v", headers) - } + c.logf(" Preflight response headers: %v", headers) } // handleActualRequest handles simple cross-origin requests, actual request or redirects @@ -440,9 +440,7 @@ func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { if c.allowCredentials { headers["Access-Control-Allow-Credentials"] = headerTrue } - if c.Log != nil { - c.logf(" Actual response added headers: %v", headers) - } + c.logf(" Actual response added headers: %v", headers) } // convenience method. checks if a logger is set. diff --git a/vendor/github.com/rs/cors/internal/sortedset.go b/vendor/github.com/rs/cors/internal/sortedset.go index 513da20f7d0..844f3f9e035 100644 --- a/vendor/github.com/rs/cors/internal/sortedset.go +++ b/vendor/github.com/rs/cors/internal/sortedset.go @@ -52,46 +52,134 @@ func (set SortedSet) String() string { return strings.Join(elems, ",") } -// Subsumes reports whether csv is a sequence of comma-separated names that are -// - all elements of set, -// - sorted in lexicographically order, +// Accepts reports whether values is a sequence of list-based field values +// whose elements are +// - all members of set, +// - sorted in lexicographical order, // - unique. -func (set SortedSet) Subsumes(csv string) bool { - if csv == "" { - return true +func (set SortedSet) Accepts(values []string) bool { + var ( // effectively constant + maxLen = maxOWSBytes + set.maxLen + maxOWSBytes + 1 // +1 for comma + ) + var ( + posOfLastNameSeen = -1 + name string + commaFound bool + emptyElements int + ok bool + ) + for _, s := range values { + for { + // As a defense against maliciously long names in s, + // we process only a small number of s's leading bytes per iteration. + name, s, commaFound = cutAtComma(s, maxLen) + name, ok = trimOWS(name, maxOWSBytes) + if !ok { + return false + } + if name == "" { + // RFC 9110 requires recipients to tolerate + // "a reasonable number of empty list elements"; see + // https://httpwg.org/specs/rfc9110.html#abnf.extension.recipient. + emptyElements++ + if emptyElements > maxEmptyElements { + return false + } + if !commaFound { // We have now exhausted the names in s. + break + } + continue + } + pos, ok := set.m[name] + if !ok { + return false + } + // The names in s are expected to be sorted in lexicographical order + // and to each appear at most once. + // Therefore, the positions (in set) of the names that + // appear in s should form a strictly increasing sequence. + // If that's not actually the case, bail out. + if pos <= posOfLastNameSeen { + return false + } + posOfLastNameSeen = pos + if !commaFound { // We have now exhausted the names in s. + break + } + } + } + return true +} + +const ( + maxOWSBytes = 1 // number of leading/trailing OWS bytes tolerated + maxEmptyElements = 16 // number of empty list elements tolerated +) + +func cutAtComma(s string, n int) (before, after string, found bool) { + // Note: this implementation draws inspiration from strings.Cut's. + end := min(len(s), n) + if i := strings.IndexByte(s[:end], ','); i >= 0 { + after = s[i+1:] // deal with this first to save one bounds check + return s[:i], after, true + } + return s, "", false +} + +// TrimOWS trims up to n bytes of [optional whitespace (OWS)] +// from the start of and/or the end of s. +// If no more than n bytes of OWS are found at the start of s +// and no more than n bytes of OWS are found at the end of s, +// it returns the trimmed result and true. +// Otherwise, it returns the original string and false. +// +// [optional whitespace (OWS)]: https://httpwg.org/specs/rfc9110.html#whitespace +func trimOWS(s string, n int) (trimmed string, ok bool) { + if s == "" { + return s, true + } + trimmed, ok = trimRightOWS(s, n) + if !ok { + return s, false } - posOfLastNameSeen := -1 - chunkSize := set.maxLen + 1 // (to accommodate for at least one comma) - for { - // As a defense against maliciously long names in csv, - // we only process at most chunkSize bytes per iteration. - end := min(len(csv), chunkSize) - comma := strings.IndexByte(csv[:end], ',') - var name string - if comma == -1 { - name = csv - } else { - name = csv[:comma] + trimmed, ok = trimLeftOWS(trimmed, n) + if !ok { + return s, false + } + return trimmed, true +} + +func trimLeftOWS(s string, n int) (string, bool) { + sCopy := s + var i int + for len(s) > 0 { + if i > n { + return sCopy, false } - pos, found := set.m[name] - if !found { - return false + if !(s[0] == ' ' || s[0] == '\t') { + break } - // The names in csv are expected to be sorted in lexicographical order - // and appear at most once in csv. - // Therefore, the positions (in set) of the names that - // appear in csv should form a strictly increasing sequence. - // If that's not actually the case, bail out. - if pos <= posOfLastNameSeen { - return false + s = s[1:] + i++ + } + return s, true +} + +func trimRightOWS(s string, n int) (string, bool) { + sCopy := s + var i int + for len(s) > 0 { + if i > n { + return sCopy, false } - posOfLastNameSeen = pos - if comma < 0 { // We've now processed all the names in csv. + last := len(s) - 1 + if !(s[last] == ' ' || s[last] == '\t') { break } - csv = csv[comma+1:] + s = s[:last] + i++ } - return true + return s, true } // TODO: when updating go directive to 1.21 or later, diff --git a/vendor/github.com/rs/cors/utils.go b/vendor/github.com/rs/cors/utils.go index 7019f45cd9c..41b0c2836a3 100644 --- a/vendor/github.com/rs/cors/utils.go +++ b/vendor/github.com/rs/cors/utils.go @@ -1,7 +1,6 @@ package cors import ( - "net/http" "strings" ) @@ -24,11 +23,3 @@ func convert(s []string, f func(string) string) []string { } return out } - -func first(hdrs http.Header, k string) ([]string, bool) { - v, found := hdrs[k] - if !found || len(v) == 0 { - return nil, false - } - return v[:1], true -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go deleted file mode 100644 index a77b4dbb762..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_nocgo.go +++ /dev/null @@ -1,92 +0,0 @@ -//go:build aix && !cgo -// +build aix,!cgo - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return []TimesStat{}, common.ErrNotImplementedError - } else { - out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") - if err != nil { - return nil, err - } - lines := strings.Split(string(out), "\n") - if len(lines) < 5 { - return []TimesStat{}, common.ErrNotImplementedError - } - - ret := TimesStat{CPU: "cpu-total"} - h := strings.Fields(lines[len(lines)-3]) // headers - v := strings.Fields(lines[len(lines)-2]) // values - for i, header := range h { - if t, err := strconv.ParseFloat(v[i], 64); err == nil { - switch header { - case `%usr`: - ret.User = t - case `%sys`: - ret.System = t - case `%wio`: - ret.Iowait = t - case `%idle`: - ret.Idle = t - } - } - } - - return []TimesStat{ret}, nil - } -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - out, err := invoke.CommandWithContext(ctx, "prtconf") - if err != nil { - return nil, err - } - - ret := InfoStat{} - for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "Number Of Processors:") { - p := strings.Fields(line) - if len(p) > 3 { - if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { - ret.Cores = int32(t) - } - } - } else if strings.HasPrefix(line, "Processor Clock Speed:") { - p := strings.Fields(line) - if len(p) > 4 { - if t, err := strconv.ParseFloat(p[3], 64); err == nil { - switch strings.ToUpper(p[4]) { - case "MHZ": - ret.Mhz = t - case "GHZ": - ret.Mhz = t * 1000.0 - case "KHZ": - ret.Mhz = t / 1000.0 - default: - ret.Mhz = t - } - } - } - break - } - } - return []InfoStat{ret}, nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - info, err := InfoWithContext(ctx) - if err == nil { - return int(info[0].Cores), nil - } - return 0, err -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go deleted file mode 100644 index 41f395e5e05..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build darwin -// +build darwin - -package cpu - -import ( - "context" - "strconv" - "strings" - - "github.com/shoenig/go-m1cpu" - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" -) - -// sys/resource.h -const ( - CPUser = 0 - cpNice = 1 - cpSys = 2 - cpIntr = 3 - cpIdle = 4 - cpUStates = 5 -) - -// default value. from time.h -var ClocksPerSec = float64(128) - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - ClocksPerSec = float64(clkTck) - } -} - -func Times(percpu bool) ([]TimesStat, error) { - return TimesWithContext(context.Background(), percpu) -} - -func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { - if percpu { - return perCPUTimes() - } - - return allCPUTimes() -} - -// Returns only one CPUInfoStat on FreeBSD -func Info() ([]InfoStat, error) { - return InfoWithContext(context.Background()) -} - -func InfoWithContext(ctx context.Context) ([]InfoStat, error) { - var ret []InfoStat - - c := InfoStat{} - c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") - family, _ := unix.SysctlUint32("machdep.cpu.family") - c.Family = strconv.FormatUint(uint64(family), 10) - model, _ := unix.SysctlUint32("machdep.cpu.model") - c.Model = strconv.FormatUint(uint64(model), 10) - stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") - c.Stepping = int32(stepping) - features, err := unix.Sysctl("machdep.cpu.features") - if err == nil { - for _, v := range strings.Fields(features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") - if err == nil { - for _, v := range strings.Fields(leaf7Features) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") - if err == nil { - for _, v := range strings.Fields(extfeatures) { - c.Flags = append(c.Flags, strings.ToLower(v)) - } - } - cores, _ := unix.SysctlUint32("machdep.cpu.core_count") - c.Cores = int32(cores) - cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") - c.CacheSize = int32(cacheSize) - c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") - - if m1cpu.IsAppleSilicon() { - c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000) - } else { - // Use the rated frequency of the CPU. This is a static value and does not - // account for low power or Turbo Boost modes. - cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") - if err == nil { - c.Mhz = float64(cpuFrequency) / 1000000.0 - } - } - - return append(ret, c), nil -} - -func CountsWithContext(ctx context.Context, logical bool) (int, error) { - var cpuArgument string - if logical { - cpuArgument = "hw.logicalcpu" - } else { - cpuArgument = "hw.physicalcpu" - } - - count, err := unix.SysctlUint32(cpuArgument) - if err != nil { - return 0, err - } - - return int(count), nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go deleted file mode 100644 index 1d5f0772ed7..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_cgo.go +++ /dev/null @@ -1,111 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package cpu - -/* -#include -#include -#include -#include -#include -#include -#include -#if TARGET_OS_MAC -#include -#endif -#include -#include -*/ -import "C" - -import ( - "bytes" - "encoding/binary" - "fmt" - "unsafe" -) - -// these CPU times for darwin is borrowed from influxdb/telegraf. - -func perCPUTimes() ([]TimesStat, error) { - var ( - count C.mach_msg_type_number_t - cpuload *C.processor_cpu_load_info_data_t - ncpu C.natural_t - ) - - status := C.host_processor_info(C.host_t(C.mach_host_self()), - C.PROCESSOR_CPU_LOAD_INFO, - &ncpu, - (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_processor_info error=%d", status) - } - - // jump through some cgo casting hoops and ensure we properly free - // the memory that cpuload points to - target := C.vm_map_t(C.mach_task_self_) - address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) - defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) - - // the body of struct processor_cpu_load_info - // aka processor_cpu_load_info_data_t - var cpu_ticks [C.CPU_STATE_MAX]uint32 - - // copy the cpuload array to a []byte buffer - // where we can binary.Read the data - size := int(ncpu) * binary.Size(cpu_ticks) - buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] - - bbuf := bytes.NewBuffer(buf) - - var ret []TimesStat - - for i := 0; i < int(ncpu); i++ { - err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) - if err != nil { - return nil, err - } - - c := TimesStat{ - CPU: fmt.Sprintf("cpu%d", i), - User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - ret = append(ret, c) - } - - return ret, nil -} - -func allCPUTimes() ([]TimesStat, error) { - var count C.mach_msg_type_number_t - var cpuload C.host_cpu_load_info_data_t - - count = C.HOST_CPU_LOAD_INFO_COUNT - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_CPU_LOAD_INFO, - C.host_info_t(unsafe.Pointer(&cpuload)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - c := TimesStat{ - CPU: "cpu-total", - User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, - System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, - Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, - Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, - } - - return []TimesStat{c}, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go deleted file mode 100644 index e067e99f980..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_darwin_nocgo.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package cpu - -import "github.com/shirou/gopsutil/v3/internal/common" - -func perCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} - -func allCPUTimes() ([]TimesStat, error) { - return []TimesStat{}, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go deleted file mode 100644 index f1a78459777..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build darwin -// +build darwin - -package common - -import ( - "context" - "os" - "os/exec" - "strings" - "unsafe" - - "golang.org/x/sys/unix" -) - -func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { - cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) - cmd.Env = getSysctrlEnv(os.Environ()) - out, err := cmd.Output() - if err != nil { - return []string{}, err - } - v := strings.Replace(string(out), "{ ", "", 1) - v = strings.Replace(string(v), " }", "", 1) - values := strings.Fields(string(v)) - - return values, nil -} - -func CallSyscall(mib []int32) ([]byte, uint64, error) { - miblen := uint64(len(mib)) - - // get required buffer size - length := uint64(0) - _, _, err := unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - 0, - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - var b []byte - return b, length, err - } - if length == 0 { - var b []byte - return b, length, err - } - // get proc info itself - buf := make([]byte, length) - _, _, err = unix.Syscall6( - 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 - uintptr(unsafe.Pointer(&mib[0])), - uintptr(miblen), - uintptr(unsafe.Pointer(&buf[0])), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if err != 0 { - return buf, length, err - } - - return buf, length, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go deleted file mode 100644 index a05a0faba0b..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go +++ /dev/null @@ -1,72 +0,0 @@ -//go:build darwin -// +build darwin - -package mem - -import ( - "context" - "fmt" - "unsafe" - - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" -) - -func getHwMemsize() (uint64, error) { - total, err := unix.SysctlUint64("hw.memsize") - if err != nil { - return 0, err - } - return total, nil -} - -// xsw_usage in sys/sysctl.h -type swapUsage struct { - Total uint64 - Avail uint64 - Used uint64 - Pagesize int32 - Encrypted bool -} - -// SwapMemory returns swapinfo. -func SwapMemory() (*SwapMemoryStat, error) { - return SwapMemoryWithContext(context.Background()) -} - -func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go - var ret *SwapMemoryStat - - value, err := unix.SysctlRaw("vm.swapusage") - if err != nil { - return ret, err - } - if len(value) != 32 { - return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) - } - swap := (*swapUsage)(unsafe.Pointer(&value[0])) - - u := float64(0) - if swap.Total != 0 { - u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 - } - - ret = &SwapMemoryStat{ - Total: swap.Total, - Used: swap.Used, - Free: swap.Avail, - UsedPercent: u, - } - - return ret, nil -} - -func SwapDevices() ([]*SwapDevice, error) { - return SwapDevicesWithContext(context.Background()) -} - -func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { - return nil, common.ErrNotImplementedError -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go deleted file mode 100644 index e5da7dcdb23..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package mem - -/* -#include -#include -*/ -import "C" - -import ( - "context" - "fmt" - "unsafe" -) - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) - var vmstat C.vm_statistics_data_t - - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_VM_INFO, - C.host_info_t(unsafe.Pointer(&vmstat)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics error=%d", status) - } - - pageSize := uint64(C.vm_kernel_page_size) - total, err := getHwMemsize() - if err != nil { - return nil, err - } - totalCount := C.natural_t(total / pageSize) - - availableCount := vmstat.inactive_count + vmstat.free_count - usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) - - usedCount := totalCount - availableCount - - return &VirtualMemoryStat{ - Total: total, - Available: pageSize * uint64(availableCount), - Used: pageSize * uint64(usedCount), - UsedPercent: usedPercent, - Free: pageSize * uint64(vmstat.free_count), - Active: pageSize * uint64(vmstat.active_count), - Inactive: pageSize * uint64(vmstat.inactive_count), - Wired: pageSize * uint64(vmstat.wire_count), - }, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go deleted file mode 100644 index c9393168032..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go +++ /dev/null @@ -1,89 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package mem - -import ( - "context" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// Runs vm_stat and returns Free and inactive pages -func getVMStat(vms *VirtualMemoryStat) error { - out, err := invoke.Command("vm_stat") - if err != nil { - return err - } - return parseVMStat(string(out), vms) -} - -func parseVMStat(out string, vms *VirtualMemoryStat) error { - var err error - - lines := strings.Split(out, "\n") - pagesize := uint64(unix.Getpagesize()) - for _, line := range lines { - fields := strings.Split(line, ":") - if len(fields) < 2 { - continue - } - key := strings.TrimSpace(fields[0]) - value := strings.Trim(fields[1], " .") - switch key { - case "Pages free": - free, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Free = free * pagesize - case "Pages inactive": - inactive, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Inactive = inactive * pagesize - case "Pages active": - active, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Active = active * pagesize - case "Pages wired down": - wired, e := strconv.ParseUint(value, 10, 64) - if e != nil { - err = e - } - vms.Wired = wired * pagesize - } - } - return err -} - -// VirtualMemory returns VirtualmemoryStat. -func VirtualMemory() (*VirtualMemoryStat, error) { - return VirtualMemoryWithContext(context.Background()) -} - -func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - ret := &VirtualMemoryStat{} - - total, err := getHwMemsize() - if err != nil { - return nil, err - } - err = getVMStat(ret) - if err != nil { - return nil, err - } - - ret.Available = ret.Free + ret.Inactive - ret.Total = total - - ret.Used = ret.Total - ret.Available - ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go deleted file mode 100644 index bd5c9587137..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_111.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !go1.16 -// +build !go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.FileInfo, error) { - return f.Readdir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go b/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go deleted file mode 100644 index a45072e924a..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux_116.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package net - -import ( - "os" -) - -func readDir(f *os.File, max int) ([]os.DirEntry, error) { - return f.ReadDir(max) -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go deleted file mode 100644 index 176661cbd6b..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin.go +++ /dev/null @@ -1,325 +0,0 @@ -//go:build darwin -// +build darwin - -package process - -import ( - "context" - "fmt" - "path/filepath" - "strconv" - "strings" - - "github.com/tklauser/go-sysconf" - "golang.org/x/sys/unix" - - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" -) - -// copied from sys/sysctl.h -const ( - CTLKern = 1 // "high kernel": proc, limits - KernProc = 14 // struct: process entries - KernProcPID = 1 // by process id - KernProcProc = 8 // only return procs - KernProcAll = 0 // everything - KernProcPathname = 12 // path to executable -) - -var clockTicks = 100 // default value - -func init() { - clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) - // ignore errors - if err == nil { - clockTicks = int(clkTck) - } -} - -type _Ctype_struct___0 struct { - Pad uint64 -} - -func pidsWithContext(ctx context.Context) ([]int32, error) { - var ret []int32 - - kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") - if err != nil { - return ret, err - } - - for _, proc := range kprocs { - ret = append(ret, int32(proc.Proc.P_pid)) - } - - return ret, nil -} - -func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Eproc.Ppid, nil -} - -func (p *Process) NameWithContext(ctx context.Context) (string, error) { - k, err := p.getKProc() - if err != nil { - return "", err - } - - name := common.ByteToString(k.Proc.P_comm[:]) - - if len(name) >= 15 { - cmdName, err := p.cmdNameWithContext(ctx) - if err != nil { - return "", err - } - if len(cmdName) > 0 { - extendedName := filepath.Base(cmdName) - if strings.HasPrefix(extendedName, p.name) { - name = extendedName - } - } - } - - return name, nil -} - -func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - - return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil -} - -func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "state", p.Pid, false, false) - if err != nil { - return []string{""}, err - } - status := convertStatusChar(r[0][0][0:1]) - return []string{status}, err -} - -func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { - // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details - pid := p.Pid - out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) - if err != nil { - return false, err - } - return strings.IndexByte(string(out), '+') != -1, nil -} - -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html - userEffectiveUID := int32(k.Eproc.Ucred.Uid) - - return []int32{userEffectiveUID}, nil -} - -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { - k, err := p.getKProc() - if err != nil { - return nil, err - } - - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_rgid), int32(k.Eproc.Pcred.P_svgid)) - - return gids, nil -} - -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { - return nil, common.ErrNotImplementedError - // k, err := p.getKProc() - // if err != nil { - // return nil, err - // } - - // groups := make([]int32, k.Eproc.Ucred.Ngroups) - // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { - // groups[i] = int32(k.Eproc.Ucred.Groups[i]) - // } - - // return groups, nil -} - -func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError - /* - k, err := p.getKProc() - if err != nil { - return "", err - } - - ttyNr := uint64(k.Eproc.Tdev) - termmap, err := getTerminalMap() - if err != nil { - return "", err - } - - return termmap[ttyNr], nil - */ -} - -func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { - k, err := p.getKProc() - if err != nil { - return 0, err - } - return int32(k.Proc.P_nice), nil -} - -func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { - return nil, common.ErrNotImplementedError -} - -func convertCPUTimes(s string) (ret float64, err error) { - var t int - var _tmp string - if strings.Contains(s, ":") { - _t := strings.Split(s, ":") - switch len(_t) { - case 3: - hour, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += hour * 60 * 60 * clockTicks - - mins, err := strconv.Atoi(_t[1]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[2] - case 2: - mins, err := strconv.Atoi(_t[0]) - if err != nil { - return ret, err - } - t += mins * 60 * clockTicks - _tmp = _t[1] - case 1, 0: - _tmp = s - default: - return ret, fmt.Errorf("wrong cpu time string") - } - } else { - _tmp = s - } - - _t := strings.Split(_tmp, ".") - if err != nil { - return ret, err - } - h, err := strconv.Atoi(_t[0]) - t += h * clockTicks - h, err = strconv.Atoi(_t[1]) - t += h - return float64(t) / float64(clockTicks), nil -} - -func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) - if err != nil { - return nil, err - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) - if err != nil { - return nil, err - } - ret = append(ret, np) - } - return ret, nil -} - -func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { - return net.ConnectionsPidWithContext(ctx, "all", p.Pid) -} - -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) -} - -func ProcessesWithContext(ctx context.Context) ([]*Process, error) { - out := []*Process{} - - pids, err := PidsWithContext(ctx) - if err != nil { - return out, err - } - - for _, pid := range pids { - p, err := NewProcessWithContext(ctx, pid) - if err != nil { - continue - } - out = append(out, p) - } - - return out, nil -} - -// Returns a proc as defined here: -// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html -func (p *Process) getKProc() (*unix.KinfoProc, error) { - return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) -} - -// call ps command. -// Return value deletes Header line(you must not input wrong arg). -// And splited by Space. Caller have responsibility to manage. -// If passed arg pid is 0, get information from all process. -func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { - var cmd []string - if pid == 0 { // will get from all processes. - cmd = []string{"-ax", "-o", arg} - } else if threadOption { - cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} - } else { - cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} - } - if nameOption { - cmd = append(cmd, "-c") - } - out, err := invoke.CommandWithContext(ctx, "ps", cmd...) - if err != nil { - return [][]string{}, err - } - lines := strings.Split(string(out), "\n") - - var ret [][]string - for _, l := range lines[1:] { - var lr []string - if nameOption { - lr = append(lr, l) - } else { - for _, r := range strings.Split(l, " ") { - if r == "" { - continue - } - lr = append(lr, strings.TrimSpace(r)) - } - } - if len(lr) != 0 { - ret = append(ret, lr) - } - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go deleted file mode 100644 index 858f08e7a41..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_cgo.go +++ /dev/null @@ -1,222 +0,0 @@ -//go:build darwin && cgo -// +build darwin,cgo - -package process - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -import ( - "bytes" - "context" - "fmt" - "strings" - "syscall" - "unsafe" - - "github.com/shirou/gopsutil/v3/cpu" -) - -var ( - argMax int - timescaleToNanoSeconds float64 -) - -func init() { - argMax = getArgMax() - timescaleToNanoSeconds = getTimeScaleToNanoSeconds() -} - -func getArgMax() int { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX} - argmax C.int - size C.size_t = C.ulong(unsafe.Sizeof(argmax)) - ) - retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0) - if retval == 0 { - return int(argmax) - } - return 0 -} - -func getTimeScaleToNanoSeconds() float64 { - var timeBaseInfo C.struct_mach_timebase_info - - C.mach_timebase_info(&timeBaseInfo) - - return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom) -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - var c C.char // need a var for unsafe.Sizeof need a var - const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c) - buffer := (*C.char)(C.malloc(C.size_t(bufsize))) - defer C.free(unsafe.Pointer(buffer)) - - ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize)) - if err != nil { - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) - } - - return C.GoString(buffer), nil -} - -// CwdWithContext retrieves the Current Working Directory for the given process. -// It uses the proc_pidinfo from libproc and will only work for processes the -// EUID can access. Otherwise "operation not permitted" will be returned as the -// error. -// Note: This might also work for other *BSD OSs. -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - const vpiSize = C.sizeof_struct_proc_vnodepathinfo - vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize)) - defer C.free(unsafe.Pointer(vpi)) - ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize) - if err != nil { - // fmt.Printf("ret: %d %T\n", ret, err) - if err == syscall.EPERM { - return "", ErrorNotPermitted - } - return "", err - } - if ret <= 0 { - return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) - } - if ret != C.sizeof_struct_proc_vnodepathinfo { - return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) - } - return C.GoString(&vpi.pvi_cdir.vip_path[0]), err -} - -func procArgs(pid int32) ([]byte, int, error) { - var ( - mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} - size C.size_t = C.ulong(argMax) - nargs C.int - result []byte - ) - procargs := (*C.char)(C.malloc(C.ulong(argMax))) - defer C.free(unsafe.Pointer(procargs)) - retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0) - if retval == 0 { - C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int) - result = C.GoBytes(unsafe.Pointer(procargs), C.int(size)) - // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result)) - return result, int(nargs), nil - } - return nil, 0, err -} - -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - return p.cmdlineSliceWithContext(ctx, true) -} - -func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { - pargs, nargs, err := procArgs(p.Pid) - if err != nil { - return nil, err - } - // The first bytes hold the nargs int, skip it. - args := bytes.Split((pargs)[C.sizeof_int:], []byte{0}) - var argStr string - // The first element is the actual binary/command path. - // command := args[0] - var argSlice []string - // var envSlice []string - // All other, non-zero elements are arguments. The first "nargs" elements - // are the arguments. Everything else in the slice is then the environment - // of the process. - for _, arg := range args[1:] { - argStr = string(arg[:]) - if len(argStr) > 0 { - if nargs > 0 { - argSlice = append(argSlice, argStr) - nargs-- - continue - } - break - // envSlice = append(envSlice, argStr) - } - } - return argSlice, err -} - -// cmdNameWithContext returns the command name (including spaces) without any arguments -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := p.cmdlineSliceWithContext(ctx, false) - if err != nil { - return "", err - } - - if len(r) == 0 { - return "", nil - } - - return r[0], err -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := p.CmdlineSliceWithContext(ctx) - if err != nil { - return "", err - } - return strings.Join(r, " "), err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return 0, err - } - - return int32(ti.pti_threadnum), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9, - System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - const tiSize = C.sizeof_struct_proc_taskinfo - ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) - defer C.free(unsafe.Pointer(ti)) - - _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(ti.pti_resident_size), - VMS: uint64(ti.pti_virtual_size), - Swap: uint64(ti.pti_pageins), - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go deleted file mode 100644 index bc1d357df8c..00000000000 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_nocgo.go +++ /dev/null @@ -1,127 +0,0 @@ -//go:build darwin && !cgo -// +build darwin,!cgo - -package process - -import ( - "context" - "fmt" - "strconv" - "strings" - - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" -) - -func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError -} - -func (p *Process) ExeWithContext(ctx context.Context) (string, error) { - out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") - if err != nil { - return "", fmt.Errorf("bad call to lsof: %s", err) - } - txtFound := 0 - lines := strings.Split(string(out), "\n") - for i := 1; i < len(lines); i++ { - if lines[i] == "ftxt" { - txtFound++ - if txtFound == 2 { - return lines[i-1][1:], nil - } - } - } - return "", fmt.Errorf("missing txt data returned by lsof") -} - -func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return "", err - } - return strings.Join(r[0], " "), err -} - -func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, true) - if err != nil { - return "", err - } - if len(r) > 0 && len(r[0]) > 0 { - return r[0][0], err - } - - return "", err -} - -// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each -// element being an argument. Because of current deficiencies in the way that the command -// line arguments are found, single arguments that have spaces in the will actually be -// reported as two separate items. In order to do something better CGO would be needed -// to use the native darwin functions. -func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { - r, err := callPsWithContext(ctx, "command", p.Pid, false, false) - if err != nil { - return nil, err - } - return r[0], err -} - -func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false) - if err != nil { - return 0, err - } - return int32(len(r)), nil -} - -func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { - r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false) - if err != nil { - return nil, err - } - - utime, err := convertCPUTimes(r[0][0]) - if err != nil { - return nil, err - } - stime, err := convertCPUTimes(r[0][1]) - if err != nil { - return nil, err - } - - ret := &cpu.TimesStat{ - CPU: "cpu", - User: utime, - System: stime, - } - return ret, nil -} - -func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { - r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false) - if err != nil { - return nil, err - } - rss, err := strconv.Atoi(r[0][0]) - if err != nil { - return nil, err - } - vms, err := strconv.Atoi(r[0][1]) - if err != nil { - return nil, err - } - pagein, err := strconv.Atoi(r[0][2]) - if err != nil { - return nil, err - } - - ret := &MemoryInfoStat{ - RSS: uint64(rss) * 1024, - VMS: uint64(vms) * 1024, - Swap: uint64(pagein), - } - - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/LICENSE b/vendor/github.com/shirou/gopsutil/v4/LICENSE similarity index 100% rename from vendor/github.com/shirou/gopsutil/v3/LICENSE rename to vendor/github.com/shirou/gopsutil/v4/LICENSE diff --git a/vendor/github.com/shirou/gopsutil/v3/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go similarity index 51% rename from vendor/github.com/shirou/gopsutil/v3/common/env.go rename to vendor/github.com/shirou/gopsutil/v4/common/env.go index 4b5f4980c21..47e471c402f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/common/env.go +++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common type EnvKeyType string @@ -11,13 +12,14 @@ type EnvKeyType string var EnvKey = EnvKeyType("env") const ( - HostProcEnvKey EnvKeyType = "HOST_PROC" - HostSysEnvKey EnvKeyType = "HOST_SYS" - HostEtcEnvKey EnvKeyType = "HOST_ETC" - HostVarEnvKey EnvKeyType = "HOST_VAR" - HostRunEnvKey EnvKeyType = "HOST_RUN" - HostDevEnvKey EnvKeyType = "HOST_DEV" - HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcEnvKey EnvKeyType = "HOST_PROC" + HostSysEnvKey EnvKeyType = "HOST_SYS" + HostEtcEnvKey EnvKeyType = "HOST_ETC" + HostVarEnvKey EnvKeyType = "HOST_VAR" + HostRunEnvKey EnvKeyType = "HOST_RUN" + HostDevEnvKey EnvKeyType = "HOST_DEV" + HostRootEnvKey EnvKeyType = "HOST_ROOT" + HostProcMountinfo EnvKeyType = "HOST_PROC_MOUNTINFO" ) type EnvMap map[EnvKeyType]string diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go index 83bc23d45ed..56f53c3a1ac 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -11,7 +12,7 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // TimesStat contains the amounts of time the CPU has spent performing different diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go index 1439d1d7939..bc766bd4fe9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package cpu diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go index 9c1e70b1730..559dc5feafd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package cpu diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go new file mode 100644 index 00000000000..329ef833666 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package cpu + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + if percpu { + per_out, err := invoke.CommandWithContext(ctx, "sar", "-u", "-P", "ALL", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(per_out), "\n") + if len(lines) < 6 { + return []TimesStat{}, common.ErrNotImplementedError + } + + hp := strings.Fields(lines[5]) // headers + for l := 6; l < len(lines)-1; l++ { + ct := &TimesStat{} + v := strings.Fields(lines[l]) // values + for i, header := range hp { + // We're done in any of these use cases + if i >= len(v) || v[0] == "-" { + break + } + + // Position variable for v + pos := i + // There is a missing field at the beginning of all but the first line + // so adjust the position + if l > 6 { + pos = i - 1 + } + // We don't want invalid positions + if pos < 0 { + continue + } + + if t, err := strconv.ParseFloat(v[pos], 64); err == nil { + switch header { + case `cpu`: + ct.CPU = strconv.FormatFloat(t, 'f', -1, 64) + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + // Valid CPU data, so append it + ret = append(ret, *ct) + } + } else { + out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + if len(lines) < 5 { + return []TimesStat{}, common.ErrNotImplementedError + } + + ct := &TimesStat{CPU: "cpu-total"} + h := strings.Fields(lines[len(lines)-3]) // headers + v := strings.Fields(lines[len(lines)-2]) // values + for i, header := range h { + if t, err := strconv.ParseFloat(v[i], 64); err == nil { + switch header { + case `%usr`: + ct.User = t + case `%sys`: + ct.System = t + case `%wio`: + ct.Iowait = t + case `%idle`: + ct.Idle = t + } + } + } + + ret = append(ret, *ct) + } + + return ret, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return nil, err + } + + ret := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "Number Of Processors:") { + p := strings.Fields(line) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + ret.Cores = int32(t) + } + } + } else if strings.HasPrefix(line, "Processor Clock Speed:") { + p := strings.Fields(line) + if len(p) > 4 { + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + switch strings.ToUpper(p[4]) { + case "MHZ": + ret.Mhz = t + case "GHZ": + ret.Mhz = t * 1000.0 + case "KHZ": + ret.Mhz = t / 1000.0 + default: + ret.Mhz = t + } + } + } + break + } else if strings.HasPrefix(line, "System Model:") { + p := strings.Split(string(line), ":") + if p != nil { + ret.VendorID = strings.TrimSpace(p[1]) + } + } else if strings.HasPrefix(line, "Processor Type:") { + p := strings.Split(string(line), ":") + if p != nil { + c := strings.Split(string(p[1]), "_") + if c != nil { + ret.Family = strings.TrimSpace(c[0]) + ret.Model = strings.TrimSpace(c[1]) + } + } + } + } + return []InfoStat{ret}, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + info, err := InfoWithContext(ctx) + if err == nil { + return int(info[0].Cores), nil + } + return 0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go new file mode 100644 index 00000000000..b3e3a668de1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package cpu + +import ( + "context" + "fmt" + "strconv" + "strings" + "unsafe" + + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// sys/resource.h +const ( + CPUser = 0 + cpNice = 1 + cpSys = 2 + cpIntr = 3 + cpIdle = 4 + cpUStates = 5 +) + +// mach/machine.h +const ( + cpuStateUser = 0 + cpuStateSystem = 1 + cpuStateIdle = 2 + cpuStateNice = 3 + cpuStateMax = 4 +) + +// mach/processor_info.h +const ( + processorCpuLoadInfo = 2 +) + +type hostCpuLoadInfoData struct { + cpuTicks [cpuStateMax]uint32 +} + +// default value. from time.h +var ClocksPerSec = float64(128) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer lib.Close() + + if percpu { + return perCPUTimes(lib) + } + + return allCPUTimes(lib) +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + + c := InfoStat{} + c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") + family, _ := unix.SysctlUint32("machdep.cpu.family") + c.Family = strconv.FormatUint(uint64(family), 10) + model, _ := unix.SysctlUint32("machdep.cpu.model") + c.Model = strconv.FormatUint(uint64(model), 10) + stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") + c.Stepping = int32(stepping) + features, err := unix.Sysctl("machdep.cpu.features") + if err == nil { + for _, v := range strings.Fields(features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") + if err == nil { + for _, v := range strings.Fields(leaf7Features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") + if err == nil { + for _, v := range strings.Fields(extfeatures) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + cores, _ := unix.SysctlUint32("machdep.cpu.core_count") + c.Cores = int32(cores) + cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") + c.CacheSize = int32(cacheSize) + c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") + + v, err := getFrequency() + if err == nil { + c.Mhz = v + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + var cpuArgument string + if logical { + cpuArgument = "hw.logicalcpu" + } else { + cpuArgument = "hw.physicalcpu" + } + + count, err := unix.SysctlUint32(cpuArgument) + if err != nil { + return 0, err + } + + return int(count), nil +} + +func perCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + machTaskSelf := common.GetFunc[common.MachTaskSelfFunc](machLib, common.MachTaskSelfSym) + hostProcessorInfo := common.GetFunc[common.HostProcessorInfoFunc](machLib, common.HostProcessorInfoSym) + vmDeallocate := common.GetFunc[common.VMDeallocateFunc](machLib, common.VMDeallocateSym) + + var count, ncpu uint32 + var cpuload *hostCpuLoadInfoData + + status := hostProcessorInfo(machHostSelf(), processorCpuLoadInfo, &ncpu, uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + defer vmDeallocate(machTaskSelf(), uintptr(unsafe.Pointer(cpuload)), uintptr(ncpu)) + + ret := []TimesStat{} + loads := unsafe.Slice(cpuload, ncpu) + + for i := 0; i < int(ncpu); i++ { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(loads[i].cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(loads[i].cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(loads[i].cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(loads[i].cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes(machLib *common.Library) ([]TimesStat, error) { + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + + var cpuload hostCpuLoadInfoData + count := uint32(cpuStateMax) + + status := hostStatistics(machHostSelf(), common.HOST_CPU_LOAD_INFO, + uintptr(unsafe.Pointer(&cpuload)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpuTicks[cpuStateUser]) / ClocksPerSec, + System: float64(cpuload.cpuTicks[cpuStateSystem]) / ClocksPerSec, + Nice: float64(cpuload.cpuTicks[cpuStateNice]) / ClocksPerSec, + Idle: float64(cpuload.cpuTicks[cpuStateIdle]) / ClocksPerSec, + } + + return []TimesStat{c}, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go new file mode 100644 index 00000000000..50318424390 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_arm64.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && arm64 + +package cpu + +import ( + "encoding/binary" + "fmt" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// https://github.com/shoenig/go-m1cpu/blob/v0.1.6/cpu.go +func getFrequency() (float64, error) { + ioKit, err := common.NewLibrary(common.IOKit) + if err != nil { + return 0, err + } + defer ioKit.Close() + + coreFoundation, err := common.NewLibrary(common.CoreFoundation) + if err != nil { + return 0, err + } + defer coreFoundation.Close() + + ioServiceMatching := common.GetFunc[common.IOServiceMatchingFunc](ioKit, common.IOServiceMatchingSym) + ioServiceGetMatchingServices := common.GetFunc[common.IOServiceGetMatchingServicesFunc](ioKit, common.IOServiceGetMatchingServicesSym) + ioIteratorNext := common.GetFunc[common.IOIteratorNextFunc](ioKit, common.IOIteratorNextSym) + ioRegistryEntryGetName := common.GetFunc[common.IORegistryEntryGetNameFunc](ioKit, common.IORegistryEntryGetNameSym) + ioRegistryEntryCreateCFProperty := common.GetFunc[common.IORegistryEntryCreateCFPropertyFunc](ioKit, common.IORegistryEntryCreateCFPropertySym) + ioObjectRelease := common.GetFunc[common.IOObjectReleaseFunc](ioKit, common.IOObjectReleaseSym) + + cfStringCreateWithCString := common.GetFunc[common.CFStringCreateWithCStringFunc](coreFoundation, common.CFStringCreateWithCStringSym) + cfDataGetLength := common.GetFunc[common.CFDataGetLengthFunc](coreFoundation, common.CFDataGetLengthSym) + cfDataGetBytePtr := common.GetFunc[common.CFDataGetBytePtrFunc](coreFoundation, common.CFDataGetBytePtrSym) + cfRelease := common.GetFunc[common.CFReleaseFunc](coreFoundation, common.CFReleaseSym) + + matching := ioServiceMatching("AppleARMIODevice") + + var iterator uint32 + if status := ioServiceGetMatchingServices(common.KIOMainPortDefault, uintptr(matching), &iterator); status != common.KERN_SUCCESS { + return 0.0, fmt.Errorf("IOServiceGetMatchingServices error=%d", status) + } + defer ioObjectRelease(iterator) + + pCorekey := cfStringCreateWithCString(common.KCFAllocatorDefault, "voltage-states5-sram", common.KCFStringEncodingUTF8) + defer cfRelease(uintptr(pCorekey)) + + var pCoreHz uint32 + for { + service := ioIteratorNext(iterator) + if !(service > 0) { + break + } + + buf := make([]byte, 512) + ioRegistryEntryGetName(service, &buf[0]) + + if common.GoString(&buf[0]) == "pmgr" { + pCoreRef := ioRegistryEntryCreateCFProperty(service, uintptr(pCorekey), common.KCFAllocatorDefault, common.KNilOptions) + length := cfDataGetLength(uintptr(pCoreRef)) + data := cfDataGetBytePtr(uintptr(pCoreRef)) + + // composite uint32 from the byte array + buf := unsafe.Slice((*byte)(data), length) + + // combine the bytes into a uint32 value + b := buf[length-8 : length-4] + pCoreHz = binary.LittleEndian.Uint32(b) + ioObjectRelease(service) + break + } + + ioObjectRelease(service) + } + + return float64(pCoreHz / 1_000_000), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go new file mode 100644 index 00000000000..b9e52aba176 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_fallback.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !arm64 + +package cpu + +import "golang.org/x/sys/unix" + +func getFrequency() (float64, error) { + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + return float64(cpuFrequency) / 1000000.0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go index fef53e5dcc1..19b1e9dd3ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,7 +11,7 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_dragonfly_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go index 089f603c8fc..245c1ec98b8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix -// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!solaris,!windows,!dragonfly,!plan9,!aix package cpu @@ -7,7 +7,7 @@ import ( "context" "runtime" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go index d3f47353cfc..5d17c7e977e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( @@ -10,9 +11,10 @@ import ( "strings" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -135,7 +137,7 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { c.Model = matches[4] t, err := strconv.ParseInt(matches[5], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %w", line, err) } c.Stepping = int32(t) } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { @@ -149,12 +151,12 @@ func parseDmesgBoot(fileName string) (InfoStat, int, error) { } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { t, err := strconv.ParseInt(matches[1], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %w", line, err) } cpuNum = int(t) t2, err := strconv.ParseInt(matches[2], 10, 32) if err != nil { - return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %w", line, err) } c.Cores = int32(t2) } diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go index 8b7f4c321eb..e4799bcf5c4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go index 8b7f4c321eb..e4799bcf5c4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go index da467e2dd20..5f595e7b3ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package cpu @@ -13,7 +13,7 @@ import ( "github.com/tklauser/go-sysconf" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ClocksPerSec = float64(100) @@ -395,7 +395,7 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { for _, line := range lines { line = strings.ToLower(line) if strings.HasPrefix(line, "processor") { - _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:])) + _, err = strconv.ParseInt(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:]), 10, 32) if err == nil { ret++ } @@ -464,11 +464,11 @@ func CountsWithContext(ctx context.Context, logical bool) (int, error) { } fields[0] = strings.TrimSpace(fields[0]) if fields[0] == "physical id" || fields[0] == "cpu cores" { - val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + val, err := strconv.ParseInt(strings.TrimSpace(fields[1]), 10, 32) if err != nil { continue } - currentInfo[fields[0]] = val + currentInfo[fields[0]] = int(val) } } ret := 0 diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go index 1f66be34255..198be5e644d 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package cpu @@ -9,9 +9,10 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) const ( diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go new file mode 100644 index 00000000000..e4799bcf5c4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go similarity index 71% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go index 57e14528db1..25ececa680e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_netbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go index fe33290300e..33233d3c74a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package cpu @@ -9,7 +9,7 @@ import ( "runtime" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go index 5e878399a38..40a6f43e498 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go index d659058cd5e..464156d5402 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go index 5e878399a38..40a6f43e498 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go index d659058cd5e..464156d5402 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go similarity index 73% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go index d659058cd5e..464156d5402 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu type cpuTimes struct { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go index a2e99d8c05e..bff2e0c7584 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package cpu @@ -9,7 +9,7 @@ import ( "runtime" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func Times(percpu bool) ([]TimesStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go index 4231ad16819..d8ba1d3242e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package cpu import ( diff --git a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go rename to vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go index e10612fd19d..4476b91cb5f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/cpu/cpu_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package cpu @@ -8,14 +8,12 @@ import ( "fmt" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/yusufpapurcu/wmi" "golang.org/x/sys/windows" ) -var ( - procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") -) +var procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") type win32_Processor struct { Family uint16 diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go index 5e8d43db835..6e75e74b018 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // Copyright 2009 The Go Authors. All rights reserved. diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common.go index 5e25e507b48..868ea4daeed 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common // @@ -14,6 +15,7 @@ import ( "errors" "fmt" "io" + "math" "net/url" "os" "os/exec" @@ -25,7 +27,7 @@ import ( "strings" "time" - "github.com/shirou/gopsutil/v3/common" + "github.com/shirou/gopsutil/v4/common" ) var ( @@ -152,7 +154,7 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { var ret []string r := bufio.NewReader(f) - for i := 0; i < n+int(offset) || n < 0; i++ { + for i := uint(0); i < uint(n)+offset || n < 0; i++ { line, err := r.ReadString('\n') if err != nil { if err == io.EOF && len(line) > 0 { @@ -160,7 +162,7 @@ func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { } break } - if i < int(offset) { + if i < offset { continue } ret = append(ret, strings.Trim(line, "\n")) @@ -462,3 +464,11 @@ func getSysctrlEnv(env []string) []string { } return env } + +// Round places rounds the number 'val' to 'n' decimal places +func Round(val float64, n int) float64 { + // Calculate the power of 10 to the n + pow10 := math.Pow(10, float64(n)) + // Multiply the value by pow10, round it, then divide it by pow10 + return math.Round(val*pow10) / pow10 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go new file mode 100644 index 00000000000..b473f88666e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package common + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + "unsafe" + + "github.com/ebitengine/purego" + "golang.org/x/sys/unix" +) + +func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { + cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} + +// Library represents a dynamic library loaded by purego. +type Library struct { + addr uintptr + path string + close func() +} + +// library paths +const ( + IOKit = "/System/Library/Frameworks/IOKit.framework/IOKit" + CoreFoundation = "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation" + System = "/usr/lib/libSystem.B.dylib" +) + +func NewLibrary(path string) (*Library, error) { + lib, err := purego.Dlopen(path, purego.RTLD_LAZY|purego.RTLD_GLOBAL) + if err != nil { + return nil, err + } + + closeFunc := func() { + purego.Dlclose(lib) + } + + return &Library{ + addr: lib, + path: path, + close: closeFunc, + }, nil +} + +func (lib *Library) Dlsym(symbol string) (uintptr, error) { + return purego.Dlsym(lib.addr, symbol) +} + +func GetFunc[T any](lib *Library, symbol string) T { + var fptr T + purego.RegisterLibFunc(&fptr, lib.addr, symbol) + return fptr +} + +func (lib *Library) Close() { + lib.close() +} + +// status codes +const ( + KERN_SUCCESS = 0 +) + +// IOKit functions and symbols. +type ( + IOServiceGetMatchingServiceFunc func(mainPort uint32, matching uintptr) uint32 + IOServiceGetMatchingServicesFunc func(mainPort uint32, matching uintptr, existing *uint32) int + IOServiceMatchingFunc func(name string) unsafe.Pointer + IOServiceOpenFunc func(service, owningTask, connType uint32, connect *uint32) int + IOServiceCloseFunc func(connect uint32) int + IOIteratorNextFunc func(iterator uint32) uint32 + IORegistryEntryGetNameFunc func(entry uint32, name *byte) int + IORegistryEntryGetParentEntryFunc func(entry uint32, plane string, parent *uint32) int + IORegistryEntryCreateCFPropertyFunc func(entry uint32, key, allocator uintptr, options uint32) unsafe.Pointer + IORegistryEntryCreateCFPropertiesFunc func(entry uint32, properties unsafe.Pointer, allocator uintptr, options uint32) int + IOObjectConformsToFunc func(object uint32, className string) bool + IOObjectReleaseFunc func(object uint32) int + IOConnectCallStructMethodFunc func(connection, selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int + + IOHIDEventSystemClientCreateFunc func(allocator uintptr) unsafe.Pointer + IOHIDEventSystemClientSetMatchingFunc func(client, match uintptr) int + IOHIDServiceClientCopyEventFunc func(service uintptr, eventType int64, + options int32, timeout int64) unsafe.Pointer + IOHIDServiceClientCopyPropertyFunc func(service, property uintptr) unsafe.Pointer + IOHIDEventGetFloatValueFunc func(event uintptr, field int32) float64 + IOHIDEventSystemClientCopyServicesFunc func(client uintptr) unsafe.Pointer +) + +const ( + IOServiceGetMatchingServiceSym = "IOServiceGetMatchingService" + IOServiceGetMatchingServicesSym = "IOServiceGetMatchingServices" + IOServiceMatchingSym = "IOServiceMatching" + IOServiceOpenSym = "IOServiceOpen" + IOServiceCloseSym = "IOServiceClose" + IOIteratorNextSym = "IOIteratorNext" + IORegistryEntryGetNameSym = "IORegistryEntryGetName" + IORegistryEntryGetParentEntrySym = "IORegistryEntryGetParentEntry" + IORegistryEntryCreateCFPropertySym = "IORegistryEntryCreateCFProperty" + IORegistryEntryCreateCFPropertiesSym = "IORegistryEntryCreateCFProperties" + IOObjectConformsToSym = "IOObjectConformsTo" + IOObjectReleaseSym = "IOObjectRelease" + IOConnectCallStructMethodSym = "IOConnectCallStructMethod" + + IOHIDEventSystemClientCreateSym = "IOHIDEventSystemClientCreate" + IOHIDEventSystemClientSetMatchingSym = "IOHIDEventSystemClientSetMatching" + IOHIDServiceClientCopyEventSym = "IOHIDServiceClientCopyEvent" + IOHIDServiceClientCopyPropertySym = "IOHIDServiceClientCopyProperty" + IOHIDEventGetFloatValueSym = "IOHIDEventGetFloatValue" + IOHIDEventSystemClientCopyServicesSym = "IOHIDEventSystemClientCopyServices" +) + +const ( + KIOMainPortDefault = 0 + + KIOHIDEventTypeTemperature = 15 + + KNilOptions = 0 +) + +const ( + KIOMediaWholeKey = "Media" + KIOServicePlane = "IOService" +) + +// CoreFoundation functions and symbols. +type ( + CFGetTypeIDFunc func(cf uintptr) int32 + CFNumberCreateFunc func(allocator uintptr, theType int32, valuePtr uintptr) unsafe.Pointer + CFNumberGetValueFunc func(num uintptr, theType int32, valuePtr uintptr) bool + CFDictionaryCreateFunc func(allocator uintptr, keys, values *unsafe.Pointer, numValues int32, + keyCallBacks, valueCallBacks uintptr) unsafe.Pointer + CFDictionaryAddValueFunc func(theDict, key, value uintptr) + CFDictionaryGetValueFunc func(theDict, key uintptr) unsafe.Pointer + CFArrayGetCountFunc func(theArray uintptr) int32 + CFArrayGetValueAtIndexFunc func(theArray uintptr, index int32) unsafe.Pointer + CFStringCreateMutableFunc func(alloc uintptr, maxLength int32) unsafe.Pointer + CFStringGetLengthFunc func(theString uintptr) int32 + CFStringGetCStringFunc func(theString uintptr, buffer *byte, bufferSize int32, encoding uint32) + CFStringCreateWithCStringFunc func(alloc uintptr, cStr string, encoding uint32) unsafe.Pointer + CFDataGetLengthFunc func(theData uintptr) int32 + CFDataGetBytePtrFunc func(theData uintptr) unsafe.Pointer + CFReleaseFunc func(cf uintptr) +) + +const ( + CFGetTypeIDSym = "CFGetTypeID" + CFNumberCreateSym = "CFNumberCreate" + CFNumberGetValueSym = "CFNumberGetValue" + CFDictionaryCreateSym = "CFDictionaryCreate" + CFDictionaryAddValueSym = "CFDictionaryAddValue" + CFDictionaryGetValueSym = "CFDictionaryGetValue" + CFArrayGetCountSym = "CFArrayGetCount" + CFArrayGetValueAtIndexSym = "CFArrayGetValueAtIndex" + CFStringCreateMutableSym = "CFStringCreateMutable" + CFStringGetLengthSym = "CFStringGetLength" + CFStringGetCStringSym = "CFStringGetCString" + CFStringCreateWithCStringSym = "CFStringCreateWithCString" + CFDataGetLengthSym = "CFDataGetLength" + CFDataGetBytePtrSym = "CFDataGetBytePtr" + CFReleaseSym = "CFRelease" +) + +const ( + KCFStringEncodingUTF8 = 0x08000100 + KCFNumberSInt64Type = 4 + KCFNumberIntType = 9 + KCFAllocatorDefault = 0 +) + +// Kernel functions and symbols. +type MachTimeBaseInfo struct { + Numer uint32 + Denom uint32 +} + +type ( + HostProcessorInfoFunc func(host uint32, flavor int32, outProcessorCount *uint32, outProcessorInfo uintptr, + outProcessorInfoCnt *uint32) int + HostStatisticsFunc func(host uint32, flavor int32, hostInfoOut uintptr, hostInfoOutCnt *uint32) int + MachHostSelfFunc func() uint32 + MachTaskSelfFunc func() uint32 + MachTimeBaseInfoFunc func(info uintptr) int + VMDeallocateFunc func(targetTask uint32, vmAddress, vmSize uintptr) int +) + +const ( + HostProcessorInfoSym = "host_processor_info" + HostStatisticsSym = "host_statistics" + MachHostSelfSym = "mach_host_self" + MachTaskSelfSym = "mach_task_self" + MachTimeBaseInfoSym = "mach_timebase_info" + VMDeallocateSym = "vm_deallocate" +) + +const ( + CTL_KERN = 1 + KERN_ARGMAX = 8 + KERN_PROCARGS2 = 49 + + HOST_VM_INFO = 2 + HOST_CPU_LOAD_INFO = 3 + + HOST_VM_INFO_COUNT = 0xf +) + +// System functions and symbols. +type ( + ProcPidPathFunc func(pid int32, buffer uintptr, bufferSize uint32) int32 + ProcPidInfoFunc func(pid, flavor int32, arg uint64, buffer uintptr, bufferSize int32) int32 +) + +const ( + SysctlSym = "sysctl" + ProcPidPathSym = "proc_pidpath" + ProcPidInfoSym = "proc_pidinfo" +) + +const ( + MAXPATHLEN = 1024 + PROC_PIDPATHINFO_MAXSIZE = 4 * MAXPATHLEN + PROC_PIDTASKINFO = 4 + PROC_PIDVNODEPATHINFO = 9 +) + +// SMC represents a SMC instance. +type SMC struct { + lib *Library + conn uint32 + callStruct IOConnectCallStructMethodFunc +} + +const ioServiceSMC = "AppleSMC" + +const ( + KSMCUserClientOpen = 0 + KSMCUserClientClose = 1 + KSMCHandleYPCEvent = 2 + KSMCReadKey = 5 + KSMCWriteKey = 6 + KSMCGetKeyCount = 7 + KSMCGetKeyFromIndex = 8 + KSMCGetKeyInfo = 9 +) + +const ( + KSMCSuccess = 0 + KSMCError = 1 + KSMCKeyNotFound = 132 +) + +func NewSMC(ioKit *Library) (*SMC, error) { + if ioKit.path != IOKit { + return nil, fmt.Errorf("library is not IOKit") + } + + ioServiceGetMatchingService := GetFunc[IOServiceGetMatchingServiceFunc](ioKit, IOServiceGetMatchingServiceSym) + ioServiceMatching := GetFunc[IOServiceMatchingFunc](ioKit, IOServiceMatchingSym) + ioServiceOpen := GetFunc[IOServiceOpenFunc](ioKit, IOServiceOpenSym) + ioObjectRelease := GetFunc[IOObjectReleaseFunc](ioKit, IOObjectReleaseSym) + machTaskSelf := GetFunc[MachTaskSelfFunc](ioKit, MachTaskSelfSym) + + ioConnectCallStructMethod := GetFunc[IOConnectCallStructMethodFunc](ioKit, IOConnectCallStructMethodSym) + + service := ioServiceGetMatchingService(0, uintptr(ioServiceMatching(ioServiceSMC))) + if service == 0 { + return nil, fmt.Errorf("ERROR: %s NOT FOUND", ioServiceSMC) + } + + var conn uint32 + if result := ioServiceOpen(service, machTaskSelf(), 0, &conn); result != 0 { + return nil, fmt.Errorf("ERROR: IOServiceOpen failed") + } + + ioObjectRelease(service) + return &SMC{ + lib: ioKit, + conn: conn, + callStruct: ioConnectCallStructMethod, + }, nil +} + +func (s *SMC) CallStruct(selector uint32, inputStruct, inputStructCnt, outputStruct uintptr, outputStructCnt *uintptr) int { + return s.callStruct(s.conn, selector, inputStruct, inputStructCnt, outputStruct, outputStructCnt) +} + +func (s *SMC) Close() error { + ioServiceClose := GetFunc[IOServiceCloseFunc](s.lib, IOServiceCloseSym) + + if result := ioServiceClose(s.conn); result != 0 { + return fmt.Errorf("ERROR: IOServiceClose failed") + } + return nil +} + +// https://github.com/ebitengine/purego/blob/main/internal/strings/strings.go#L26 +func GoString(cStr *byte) string { + if cStr == nil { + return "" + } + var length int + for { + if *(*byte)(unsafe.Add(unsafe.Pointer(cStr), uintptr(length))) == '\x00' { + break + } + length++ + } + return string(unsafe.Slice(cStr, length)) +} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go index f590e2e67e6..53cdceeb6d4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd -// +build freebsd openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go index a429e16a2cc..541de93d357 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package common @@ -90,6 +90,8 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if enableCache { atomic.StoreUint64(&cachedBootTime, t) } + + return t, nil } filename := HostProcWithContext(ctx, "uptime") @@ -97,6 +99,8 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if err != nil { return handleBootTimeFileReadErr(err) } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + if len(lines) != 1 { return 0, fmt.Errorf("wrong uptime format") } @@ -105,7 +109,6 @@ func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) if err != nil { return 0, err } - currentTime := float64(time.Now().UnixNano()) / float64(time.Second) t := currentTime - b if enableCache { diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go index efbc710a5ef..206532126c9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go index 58d76f334e2..00fa19a2fb4 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go similarity index 62% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go index 4af7e5c2aa8..c9f91b1698a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || darwin || openbsd -// +build linux freebsd darwin openbsd package common @@ -40,23 +40,3 @@ func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args .. } return ret, nil } - -func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { - out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid))) - if err != nil { - return []int32{}, err - } - lines := strings.Split(string(out), "\n") - ret := make([]int32, 0, len(lines)) - for _, l := range lines { - if len(l) == 0 { - continue - } - i, err := strconv.ParseInt(l, 10, 32) - if err != nil { - continue - } - ret = append(ret, int32(i)) - } - return ret, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go similarity index 99% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go index 301b2315bb4..766ed2fcba3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package common diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go similarity index 88% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go index 147cfdc4b71..113ff2e9f42 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "unsafe" diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go similarity index 89% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go index 94cedfd3438..504f13ffd98 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import ( diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go rename to vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go index a4aaadaf54d..888cc57faee 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package common import "fmt" diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go new file mode 100644 index 00000000000..0a12fe2fe34 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "context" + "encoding/json" +) + +type ExVirtualMemory struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` + ActiveAnon uint64 `json:"activeanon"` + InactiveAnon uint64 `json:"inactiveanon"` + Unevictable uint64 `json:"unevictable"` +} + +func (v ExVirtualMemory) String() string { + s, _ := json.Marshal(v) + return string(s) +} + +type ExLinux struct{} + +func NewExLinux() *ExLinux { + return &ExLinux{} +} + +func (ex *ExLinux) VirtualMemory() (*ExVirtualMemory, error) { + return ex.VirtualMemoryWithContext(context.Background()) +} + +func (ex *ExLinux) VirtualMemoryWithContext(ctx context.Context) (*ExVirtualMemory, error) { + _, vmEx, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vmEx, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go new file mode 100644 index 00000000000..5c49a478ce5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// ExVirtualMemory represents Windows specific information +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex +// https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-performance_information +type ExVirtualMemory struct { + CommitLimit uint64 `json:"commitLimit"` + CommitTotal uint64 `json:"commitTotal"` + VirtualTotal uint64 `json:"virtualTotal"` + VirtualAvail uint64 `json:"virtualAvail"` +} + +type ExWindows struct{} + +func NewExWindows() *ExWindows { + return &ExWindows{} +} + +func (e *ExWindows) VirtualMemory() (*ExVirtualMemory, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + perf, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if perf == 0 { + return nil, windows.GetLastError() + } + + ret := &ExVirtualMemory{ + CommitLimit: perfInfo.commitLimit * perfInfo.pageSize, + CommitTotal: perfInfo.commitTotal * perfInfo.pageSize, + VirtualTotal: memInfo.ullTotalVirtual, + VirtualAvail: memInfo.ullAvailVirtual, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem.go index edaf268bbf4..0da71a98863 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -1,9 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause package mem import ( "encoding/json" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go similarity index 58% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go index 22a6a4e9265..ac2c39dd382 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -1,10 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package mem import ( "context" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -14,3 +16,7 @@ func VirtualMemory() (*VirtualMemoryStat, error) { func SwapMemory() (*SwapMemoryStat, error) { return SwapMemoryWithContext(context.Background()) } + +func SwapDevices() ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go index 67e11dff886..2d03dd0c3f7 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go similarity index 83% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go index cc6a76d2f0d..bc3c0ed3b4a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package mem @@ -8,11 +8,11 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { - vmem, swap, err := callSVMon(ctx) + vmem, swap, err := callSVMon(ctx, true) if err != nil { return nil, err } @@ -25,7 +25,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { - _, swap, err := callSVMon(ctx) + _, swap, err := callSVMon(ctx, false) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { return swap, nil } -func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { +func callSVMon(ctx context.Context, virt bool) (*VirtualMemoryStat, *SwapMemoryStat, error) { out, err := invoke.CommandWithContext(ctx, "svmon", "-G") if err != nil { return nil, nil, err @@ -45,7 +45,7 @@ func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) vmem := &VirtualMemoryStat{} swap := &SwapMemoryStat{} for _, line := range strings.Split(string(out), "\n") { - if strings.HasPrefix(line, "memory") { + if virt && strings.HasPrefix(line, "memory") { p := strings.Fields(line) if len(p) > 2 { if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go index ef867d74223..4f3e57c038a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go new file mode 100644 index 00000000000..a4c15f6915d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package mem + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func getHwMemsize() (uint64, error) { + total, err := unix.SysctlUint64("hw.memsize") + if err != nil { + return 0, err + } + return total, nil +} + +// xsw_usage in sys/sysctl.h +type swapUsage struct { + Total uint64 + Avail uint64 + Used uint64 + Pagesize int32 + Encrypted bool +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go + var ret *SwapMemoryStat + + value, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return ret, err + } + if len(value) != 32 { + return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) + } + swap := (*swapUsage)(unsafe.Pointer(&value[0])) + + u := float64(0) + if swap.Total != 0 { + u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 + } + + ret = &SwapMemoryStat{ + Total: swap.Total, + Used: swap.Used, + Free: swap.Avail, + UsedPercent: u, + } + + return ret, nil +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} + +type vmStatisticsData struct { + freeCount uint32 + activeCount uint32 + inactiveCount uint32 + wireCount uint32 + _ [44]byte // Not used here +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + machLib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + defer machLib.Close() + + hostStatistics := common.GetFunc[common.HostStatisticsFunc](machLib, common.HostStatisticsSym) + machHostSelf := common.GetFunc[common.MachHostSelfFunc](machLib, common.MachHostSelfSym) + + count := uint32(common.HOST_VM_INFO_COUNT) + var vmstat vmStatisticsData + + status := hostStatistics(machHostSelf(), common.HOST_VM_INFO, + uintptr(unsafe.Pointer(&vmstat)), &count) + + if status != common.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSizeAddr, _ := machLib.Dlsym("vm_kernel_page_size") + pageSize := **(**uint64)(unsafe.Pointer(&pageSizeAddr)) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := uint32(total / pageSize) + + availableCount := vmstat.inactiveCount + vmstat.freeCount + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.freeCount), + Active: pageSize * uint64(vmstat.activeCount), + Inactive: pageSize * uint64(vmstat.inactiveCount), + Wired: pageSize * uint64(vmstat.wireCount), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go index 697fd870963..ba882c8bee9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go @@ -1,12 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd -// +build !darwin,!linux,!freebsd,!openbsd,!solaris,!windows,!plan9,!aix,!netbsd package mem import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go index 9a56785b319..a6deddebdd8 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package mem @@ -8,8 +8,9 @@ import ( "errors" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" ) func VirtualMemory() (*VirtualMemoryStat, error) { @@ -85,7 +86,6 @@ func SwapMemory() (*SwapMemoryStat, error) { } // Constants from vm/vm_param.h -// nolint: golint const ( XSWDEV_VERSION11 = 1 XSWDEV_VERSION = 2 diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go index 214a91e47f6..05bfdaf4e1a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -1,12 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package mem import ( "bufio" "context" - "encoding/json" "fmt" "io" "math" @@ -16,22 +15,9 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) -type VirtualMemoryExStat struct { - ActiveFile uint64 `json:"activefile"` - InactiveFile uint64 `json:"inactivefile"` - ActiveAnon uint64 `json:"activeanon"` - InactiveAnon uint64 `json:"inactiveanon"` - Unevictable uint64 `json:"unevictable"` -} - -func (v VirtualMemoryExStat) String() string { - s, _ := json.Marshal(v) - return string(s) -} - func VirtualMemory() (*VirtualMemoryStat, error) { return VirtualMemoryWithContext(context.Background()) } @@ -44,19 +30,7 @@ func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { return vm, nil } -func VirtualMemoryEx() (*VirtualMemoryExStat, error) { - return VirtualMemoryExWithContext(context.Background()) -} - -func VirtualMemoryExWithContext(ctx context.Context) (*VirtualMemoryExStat, error) { - _, vmEx, err := fillFromMeminfoWithContext(ctx) - if err != nil { - return nil, err - } - return vmEx, nil -} - -func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *VirtualMemoryExStat, error) { +func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVirtualMemory, error) { filename := common.HostProcWithContext(ctx, "meminfo") lines, _ := common.ReadLines(filename) @@ -67,7 +41,7 @@ func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *Virtu sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006 ret := &VirtualMemoryStat{} - retEx := &VirtualMemoryExStat{} + retEx := &ExVirtualMemory{} for _, line := range lines { fields := strings.Split(line, ":") @@ -409,7 +383,7 @@ func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { // calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide // "MemAvailable:" column. It reimplements an algorithm from the link below // https://github.com/giampaolo/psutil/pull/890 -func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *VirtualMemoryExStat) uint64 { +func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *ExVirtualMemory) uint64 { var watermarkLow uint64 fn := common.HostProcWithContext(ctx, "zoneinfo") diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go index d1f54ecaf3f..0a41b3e340e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_netbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build netbsd -// +build netbsd package mem diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go index e37d5abe0dd..2510bb0d3aa 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package mem @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go index de2b26ca40a..552e93f4a28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go index d187abf01fa..73e5b72aa67 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go index 2488f18517c..57b5861de5b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go index 3661b16fb1b..f39a6456b73 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go index 7a7b480384e..f9f838f54ed 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs mem/types_openbsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go index b5259f8446e..c17a102ee62 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package mem @@ -8,7 +8,7 @@ import ( "os" stats "github.com/lufia/plan9stats" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func SwapMemory() (*SwapMemoryStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go index c911267e1ef..06d0d9a006b 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package mem @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "github.com/tklauser/go-sysconf" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go rename to vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go index 8c7fb1a1353..a94b61f4bb5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package mem @@ -9,7 +9,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) @@ -77,26 +77,42 @@ func SwapMemory() (*SwapMemoryStat, error) { } func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // Use the performance counter to get the swap usage percentage + counter, err := common.NewWin32PerformanceCounter("swap_percentage", `\Paging File(_Total)\% Usage`) + if err != nil { + return nil, err + } + defer common.PdhCloseQuery.Call(uintptr(counter.Query)) + + usedPercent, err := counter.GetValue() + if err != nil { + return nil, err + } + + // Get total memory from performance information var perfInfo performanceInformation perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) if mem == 0 { return nil, windows.GetLastError() } - tot := perfInfo.commitLimit * perfInfo.pageSize - used := perfInfo.commitTotal * perfInfo.pageSize - free := tot - used - var usedPercent float64 - if tot == 0 { - usedPercent = 0 + totalPhys := perfInfo.physicalTotal * perfInfo.pageSize + totalSys := perfInfo.commitLimit * perfInfo.pageSize + total := totalSys - totalPhys + + var used uint64 + if total > 0 { + used = uint64(0.01 * usedPercent * float64(total)) } else { - usedPercent = float64(used) / float64(tot) * 100 + usedPercent = 0.0 + used = 0 } + ret := &SwapMemoryStat{ - Total: tot, + Total: total, Used: used, - Free: free, - UsedPercent: usedPercent, + Free: total - used, + UsedPercent: common.Round(usedPercent, 1), } return ret, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net.go b/vendor/github.com/shirou/gopsutil/v4/net/net.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net.go rename to vendor/github.com/shirou/gopsutil/v4/net/net.go index 0f3a62f39c5..3890eda5308 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package net import ( @@ -5,7 +6,7 @@ import ( "encoding/json" "net" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var invoke common.Invoker = common.Invoke{} diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go similarity index 93% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix.go index 81feaa8d7a0..08a100d811a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix -// +build aix package net @@ -11,7 +11,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func IOCounters(pernic bool) ([]IOCountersStat, error) { @@ -117,7 +117,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -286,11 +286,11 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return ret, nil } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -305,8 +305,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -317,14 +317,14 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go index 8c34f881c0b..a45a5b75cce 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_cgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && cgo -// +build aix,cgo package net diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go index e3fce9021ce..f63a21e73bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_aix_nocgo.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build aix && !cgo -// +build aix,!cgo package net @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func parseNetstatI(output string) ([]IOCountersStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go similarity index 96% rename from vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go index 8a7b6374438..8f3f4d386dd 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_darwin.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin -// +build darwin package net @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var ( @@ -143,8 +143,8 @@ func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { return output } -func (min mapInterfaceNameUsage) isTruncated() bool { - for _, usage := range min { +func (mapi mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range mapi { if usage > 1 { return true } @@ -152,9 +152,9 @@ func (min mapInterfaceNameUsage) isTruncated() bool { return false } -func (min mapInterfaceNameUsage) notTruncated() []string { +func (mapi mapInterfaceNameUsage) notTruncated() []string { output := make([]string, 0) - for ifaceName, usage := range min { + for ifaceName, usage := range mapi { if usage == 1 { output = append(output, ifaceName) } @@ -247,7 +247,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, } } - if pernic == false { + if !pernic { return getIOCountersAll(ret) } return ret, nil diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go index e136be1bace..a765e216b88 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -1,12 +1,12 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris -// +build !aix,!darwin,!linux,!freebsd,!openbsd,!windows,!solaris package net import ( "context" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func IOCounters(pernic bool) ([]IOCountersStat, error) { @@ -49,11 +49,11 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, return []ConnectionStat{}, common.ErrNotImplementedError } -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -68,8 +68,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -80,14 +80,14 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go index bf8baf09495..ccaab73e0b3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package net @@ -8,7 +8,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) func IOCounters(pernic bool) ([]IOCountersStat, error) { @@ -83,7 +83,7 @@ func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, ret = append(ret, n) } - if pernic == false { + if !pernic { return getIOCountersAll(ret) } @@ -96,7 +96,7 @@ func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { } func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { - return IOCounters(pernic) + return IOCountersWithContext(ctx, pernic) } func FilterCounters() ([]FilterStat, error) { diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/net/net_linux.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_linux.go index 20ca5470a49..2c79facb057 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package net @@ -16,7 +16,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) const ( // Conntrack Column numbers @@ -238,14 +238,14 @@ func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { } stats := make([]FilterStat, 0, 1) - max, err := common.ReadInts(maxfile) + maxConn, err := common.ReadInts(maxfile) if err != nil { return nil, err } payload := FilterStat{ ConnTrackCount: count[0], - ConnTrackMax: max[0], + ConnTrackMax: maxConn[0], } stats = append(stats, payload) @@ -396,12 +396,12 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, // Return a list of network connections opened returning at most `max` // connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, maxConn) } // Return a list of network connections opened, omitting `Uids`. @@ -415,8 +415,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } // Return a list of network connections opened by a process. @@ -437,23 +437,23 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid } // Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false) +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, false) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn, true) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int, skipUids bool) ([]ConnectionStat, error) { tmap, ok := netConnectionKindMap[kind] if !ok { return nil, fmt.Errorf("invalid kind, %s", kind) @@ -462,9 +462,9 @@ func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, p var err error var inodes map[string][]inodeMap if pid == 0 { - inodes, err = getProcInodesAllWithContext(ctx, root, max) + inodes, err = getProcInodesAllWithContext(ctx, root, maxConn) } else { - inodes, err = getProcInodes(root, pid, max) + inodes, err = getProcInodes(root, pid, maxConn) if len(inodes) == 0 { // no connection for the pid return []ConnectionStat{}, nil @@ -543,7 +543,7 @@ func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tma } // getProcInodes returns fd of the pid. -func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { +func getProcInodes(root string, pid int32, maxConn int) (map[string][]inodeMap, error) { ret := make(map[string][]inodeMap) dir := fmt.Sprintf("%s/%d/fd", root, pid) @@ -552,7 +552,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro return ret, err } defer f.Close() - dirEntries, err := readDir(f, max) + dirEntries, err := f.ReadDir(maxConn) if err != nil { return ret, err } @@ -573,7 +573,7 @@ func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, erro if !ok { ret[inode] = make([]inodeMap, 0) } - fd, err := strconv.Atoi(dirEntry.Name()) + fd, err := strconv.ParseInt(dirEntry.Name(), 10, 32) if err != nil { continue } @@ -668,11 +668,11 @@ func (p *process) fillFromStatus(ctx context.Context) error { return nil } -func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { - return getProcInodesAllWithContext(context.Background(), root, max) +func getProcInodesAll(root string, maxConn int) (map[string][]inodeMap, error) { + return getProcInodesAllWithContext(context.Background(), root, maxConn) } -func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { +func getProcInodesAllWithContext(ctx context.Context, root string, maxConn int) (map[string][]inodeMap, error) { pids, err := PidsWithContext(ctx) if err != nil { return nil, err @@ -680,7 +680,7 @@ func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map ret := make(map[string][]inodeMap) for _, pid := range pids { - t, err := getProcInodes(root, pid, max) + t, err := getProcInodes(root, pid, maxConn) if err != nil { // skip if permission error or no longer exists if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) { @@ -858,7 +858,7 @@ func processUnix(file string, kind netConnectionKindType, inodes map[string][]in if len(tokens) < 6 { continue } - st, err := strconv.Atoi(tokens[4]) + st, err := strconv.ParseInt(tokens[4], 10, 32) if err != nil { return nil, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go index 25bbe49ca1f..7fae18b936c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package net @@ -12,13 +12,14 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) func ParseNetstat(output string, mode string, - iocs map[string]IOCountersStat) error { + iocs map[string]IOCountersStat, +) error { lines := strings.Split(output, "\n") exists := make([]string, 0, len(lines)-1) @@ -96,7 +97,7 @@ func ParseNetstat(output string, mode string, n.PacketsSent = parsed[2] n.Dropout = parsed[3] case "ine": - n.Errin = parsed[0] + n.Errin = parsed[0] n.Errout = parsed[1] } @@ -254,7 +255,7 @@ func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, r return Addr{}, fmt.Errorf("unknown family, %d", family) } } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go index 79d8ac30e24..b886066e82e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build solaris -// +build solaris package net @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // NetIOCounters returnes network I/O statistics for every network diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go similarity index 86% rename from vendor/github.com/shirou/gopsutil/v3/net/net_unix.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_unix.go index cb846e28a6d..62f8907abf5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_unix.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd || darwin -// +build freebsd darwin package net @@ -11,7 +11,7 @@ import ( "strings" "syscall" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) // Return a list of network connections opened. @@ -25,11 +25,11 @@ func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, // Return a list of network connections opened returning at most `max` // connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -109,11 +109,11 @@ func parseNetLine(line string) (ConnectionStat, error) { f[7] = "unix" } - pid, err := strconv.Atoi(f[1]) + pid, err := strconv.ParseInt(f[1], 10, 32) if err != nil { return ConnectionStat{}, err } - fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + fd, err := strconv.ParseInt(strings.Trim(f[3], "u"), 10, 32) if err != nil { return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) } @@ -157,7 +157,7 @@ func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { if err != nil { return Addr{}, fmt.Errorf("wrong addr, %s", l) } - lport, err := strconv.Atoi(port) + lport, err := strconv.ParseInt(port, 10, 32) if err != nil { return Addr{}, err } @@ -180,11 +180,11 @@ func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { } // Return up to `max` network connections opened by a process. -func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMax(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -199,8 +199,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -211,14 +211,14 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/net/net_windows.go rename to vendor/github.com/shirou/gopsutil/v4/net/net_windows.go index 5d384342f87..f1145feab29 100644 --- a/vendor/github.com/shirou/gopsutil/v3/net/net_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package net @@ -11,7 +11,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) @@ -279,11 +279,11 @@ func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error // Return a list of network connections opened returning at most `max` // connections for each running process. -func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { - return ConnectionsMaxWithContext(context.Background(), kind, max) +func ConnectionsMax(kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, maxConn) } -func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { +func ConnectionsMaxWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } @@ -298,8 +298,8 @@ func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]Conn return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) } -func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, maxConn) } func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { @@ -310,15 +310,15 @@ func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) } -func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { - return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +func ConnectionsPidMaxWithoutUids(kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, maxConn) } -func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { - return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, maxConn) } -func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, maxConn int) ([]ConnectionStat, error) { return []ConnectionStat{}, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go similarity index 91% rename from vendor/github.com/shirou/gopsutil/v3/process/process.go rename to vendor/github.com/shirou/gopsutil/v4/process/process.go index 1bb27abf8e8..70411c61644 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -9,15 +10,15 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/mem" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" ) var ( invoke common.Invoker = common.Invoke{} - ErrorNoChildren = errors.New("process does not have children") + ErrorNoChildren = errors.New("process does not have children") // Deprecated: ErrorNoChildren is never returned by process.Children(), check its returned []*Process slice length instead ErrorProcessNotRunning = errors.New("process does not exist") ErrorNotPermitted = errors.New("operation not permitted") ) @@ -29,9 +30,9 @@ type Process struct { parent int32 parentMutex sync.RWMutex // for windows ppid cache numCtxSwitches *NumCtxSwitchesStat - uids []int32 - gids []int32 - groups []int32 + uids []uint32 + gids []uint32 + groups []uint32 numThreads int32 memInfo *MemoryInfoStat sigInfo *SignalInfoStat @@ -102,10 +103,18 @@ type RlimitStat struct { } type IOCountersStat struct { - ReadCount uint64 `json:"readCount"` + // ReadCount is a number of read I/O operations such as syscalls. + ReadCount uint64 `json:"readCount"` + // WriteCount is a number of read I/O operations such as syscalls. WriteCount uint64 `json:"writeCount"` - ReadBytes uint64 `json:"readBytes"` + // ReadBytes is a number of all I/O read in bytes. This includes disk I/O on Linux and Windows. + ReadBytes uint64 `json:"readBytes"` + // WriteBytes is a number of all I/O write in bytes. This includes disk I/O on Linux and Windows. WriteBytes uint64 `json:"writeBytes"` + // DiskReadBytes is a number of disk I/O write in bytes. Currently only Linux has this value. + DiskReadBytes uint64 `json:"diskReadBytes"` + // DiskWriteBytes is a number of disk I/O read in bytes. Currently only Linux has this value. + DiskWriteBytes uint64 `json:"diskWriteBytes"` } type NumCtxSwitchesStat struct { @@ -316,7 +325,11 @@ func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 if delta == 0 { return 0 } - delta_proc := t2.Total() - t1.Total() + // https://github.com/giampaolo/psutil/blob/c034e6692cf736b5e87d14418a8153bb03f6cf42/psutil/__init__.py#L1064 + delta_proc := (t2.User - t1.User) + (t2.System - t1.System) + if delta_proc <= 0 { + return 0 + } overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) return overall_percent } @@ -368,7 +381,7 @@ func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { } // Groups returns all group IDs(include supplementary groups) of the process as a slice of the int -func (p *Process) Groups() ([]int32, error) { +func (p *Process) Groups() ([]uint32, error) { return p.GroupsWithContext(context.Background()) } @@ -433,12 +446,12 @@ func (p *Process) Foreground() (bool, error) { } // Uids returns user ids of the process as a slice of the int -func (p *Process) Uids() ([]int32, error) { +func (p *Process) Uids() ([]uint32, error) { return p.UidsWithContext(context.Background()) } // Gids returns group ids of the process as a slice of the int -func (p *Process) Gids() ([]int32, error) { +func (p *Process) Gids() ([]uint32, error) { return p.GidsWithContext(context.Background()) } @@ -538,8 +551,8 @@ func (p *Process) Connections() ([]net.ConnectionStat, error) { } // ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. -func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { - return p.ConnectionsMaxWithContext(context.Background(), max) +func (p *Process) ConnectionsMax(maxConn int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), maxConn) } // MemoryMaps get memory maps from /proc/(pid)/smaps diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go index 263829ffae5..dcc056101a0 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_bsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin || freebsd || openbsd -// +build darwin freebsd openbsd package process @@ -8,8 +8,8 @@ import ( "context" "encoding/binary" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" ) type MemoryInfoExStat struct{} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go new file mode 100644 index 00000000000..05c7562b767 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -0,0 +1,480 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +type _Ctype_struct___0 struct { + Pad uint64 +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return ret, err + } + + for _, proc := range kprocs { + ret = append(ret, int32(proc.Proc.P_pid)) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Eproc.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + name := common.ByteToString(k.Proc.P_comm[:]) + + if len(name) >= 15 { + cmdName, err := p.cmdNameWithContext(ctx) + if err != nil { + return "", err + } + if len(cmdName) > 0 { + extendedName := filepath.Base(cmdName) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false, false) + if err != nil { + return []string{""}, err + } + status := convertStatusChar(r[0][0][0:1]) + return []string{status}, err +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := uint32(k.Eproc.Ucred.Uid) + + return []uint32{userEffectiveUID}, nil +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError + // k, err := p.getKProc() + // if err != nil { + // return nil, err + // } + + // groups := make([]int32, k.Eproc.Ucred.Ngroups) + // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { + // groups[i] = int32(k.Eproc.Ucred.Groups[i]) + // } + + // return groups, nil +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + procs, err := ProcessesWithContext(ctx) + if err != nil { + return nil, nil + } + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) + if err != nil { + continue + } + if ppid == p.Pid { + ret = append(ret, proc) + } + } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*unix.KinfoProc, error) { + return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And splited by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-ax", "-o", arg} + } else if threadOption { + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + } else { + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + if nameOption { + cmd = append(cmd, "-c") + } + out, err := invoke.CommandWithContext(ctx, "ps", cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + if nameOption { + lr = append(lr, l) + } else { + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} + +var ( + procPidPath common.ProcPidPathFunc + procPidInfo common.ProcPidInfoFunc + machTimeBaseInfo common.MachTimeBaseInfoFunc +) + +func registerFuncs() (*common.Library, error) { + lib, err := common.NewLibrary(common.System) + if err != nil { + return nil, err + } + + procPidPath = common.GetFunc[common.ProcPidPathFunc](lib, common.ProcPidPathSym) + procPidInfo = common.GetFunc[common.ProcPidInfoFunc](lib, common.ProcPidInfoSym) + machTimeBaseInfo = common.GetFunc[common.MachTimeBaseInfoFunc](lib, common.MachTimeBaseInfoSym) + + return lib, nil +} + +func getTimeScaleToNanoSeconds() float64 { + var timeBaseInfo common.MachTimeBaseInfo + + machTimeBaseInfo(uintptr(unsafe.Pointer(&timeBaseInfo))) + + return float64(timeBaseInfo.Numer) / float64(timeBaseInfo.Denom) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + lib, err := registerFuncs() + if err != nil { + return "", err + } + defer lib.Close() + + buf := make([]byte, common.PROC_PIDPATHINFO_MAXSIZE) + ret := procPidPath(p.Pid, uintptr(unsafe.Pointer(&buf[0])), common.PROC_PIDPATHINFO_MAXSIZE) + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) + } + + return common.GoString(&buf[0]), nil +} + +// sys/proc_info.h +type vnodePathInfo struct { + _ [152]byte + vipPath [common.MAXPATHLEN]byte + _ [1176]byte +} + +// CwdWithContext retrieves the Current Working Directory for the given process. +// It uses the proc_pidinfo from libproc and will only work for processes the +// EUID can access. Otherwise "operation not permitted" will be returned as the +// error. +// Note: This might also work for other *BSD OSs. +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + lib, err := registerFuncs() + if err != nil { + return "", err + } + defer lib.Close() + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var vpi vnodePathInfo + const vpiSize = int32(unsafe.Sizeof(vpi)) + ret := procPidInfo(p.Pid, common.PROC_PIDVNODEPATHINFO, 0, uintptr(unsafe.Pointer(&vpi)), vpiSize) + errno, _ := lib.Dlsym("errno") + err = *(**unix.Errno)(unsafe.Pointer(&errno)) + if err == unix.EPERM { + return "", ErrorNotPermitted + } + + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) + } + + if ret != vpiSize { + return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) + } + return common.GoString(&vpi.vipPath[0]), nil +} + +func procArgs(pid int32) ([]byte, int, error) { + procargs, _, err := common.CallSyscall([]int32{common.CTL_KERN, common.KERN_PROCARGS2, pid}) + if err != nil { + return nil, 0, err + } + nargs := procargs[:4] + return procargs, int(binary.LittleEndian.Uint32(nargs)), nil +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.cmdlineSliceWithContext(ctx, true) +} + +func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { + pargs, nargs, err := procArgs(p.Pid) + if err != nil { + return nil, err + } + // The first bytes hold the nargs int, skip it. + args := bytes.Split((pargs)[unsafe.Sizeof(int(0)):], []byte{0}) + var argStr string + // The first element is the actual binary/command path. + // command := args[0] + var argSlice []string + // var envSlice []string + // All other, non-zero elements are arguments. The first "nargs" elements + // are the arguments. Everything else in the slice is then the environment + // of the process. + for _, arg := range args[1:] { + argStr = string(arg[:]) + if len(argStr) > 0 { + if nargs > 0 { + argSlice = append(argSlice, argStr) + nargs-- + continue + } + break + // envSlice = append(envSlice, argStr) + } + } + return argSlice, err +} + +// cmdNameWithContext returns the command name (including spaces) without any arguments +func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { + r, err := p.cmdlineSliceWithContext(ctx, false) + if err != nil { + return "", err + } + + if len(r) == 0 { + return "", nil + } + + return r[0], err +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + return strings.Join(r, " "), err +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + lib, err := registerFuncs() + if err != nil { + return 0, err + } + defer lib.Close() + + var ti ProcTaskInfo + const tiSize = int32(unsafe.Sizeof(ti)) + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), tiSize) + + return int32(ti.Threadnum), nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + lib, err := registerFuncs() + if err != nil { + return nil, err + } + defer lib.Close() + + var ti ProcTaskInfo + const tiSize = int32(unsafe.Sizeof(ti)) + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), tiSize) + + timescaleToNanoSeconds := getTimeScaleToNanoSeconds() + ret := &cpu.TimesStat{ + CPU: "cpu", + User: float64(ti.Total_user) * timescaleToNanoSeconds / 1e9, + System: float64(ti.Total_system) * timescaleToNanoSeconds / 1e9, + } + return ret, nil +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + lib, err := registerFuncs() + if err != nil { + return nil, err + } + defer lib.Close() + + var ti ProcTaskInfo + const tiSize = int32(unsafe.Sizeof(ti)) + procPidInfo(p.Pid, common.PROC_PIDTASKINFO, 0, uintptr(unsafe.Pointer(&ti)), tiSize) + + ret := &MemoryInfoStat{ + RSS: uint64(ti.Resident_size), + VMS: uint64(ti.Virtual_size), + Swap: uint64(ti.Pageins), + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go similarity index 87% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go index b353e5eac7b..890a5d5331a 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_darwin.go @@ -211,6 +212,27 @@ type Posix_cred struct { type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go index cbd6bdc793c..8075cf227d1 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_darwin_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build darwin && arm64 -// +build darwin,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_darwin.go @@ -190,6 +190,27 @@ type Posix_cred struct{} type Label struct{} +type ProcTaskInfo struct { + Virtual_size uint64 + Resident_size uint64 + Total_user uint64 + Total_system uint64 + Threads_user uint64 + Threads_system uint64 + Policy int32 + Faults int32 + Pageins int32 + Cow_faults int32 + Messages_sent int32 + Messages_received int32 + Syscalls_mach int32 + Syscalls_unix int32 + Csw int32 + Threadnum int32 + Numrunning int32 + Priority int32 +} + type AuditinfoAddr struct { Auid uint32 Mask AuMask diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go index 1a5d0c4b4a7..e5410ea049c 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_fallback.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 -// +build !darwin,!linux,!freebsd,!openbsd,!windows,!solaris,!plan9 package process @@ -7,9 +7,9 @@ import ( "context" "syscall" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Signal @@ -82,15 +82,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -166,7 +166,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go similarity index 84% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go index 40b10e14fcd..76373736bfc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -1,19 +1,22 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd -// +build freebsd package process import ( "bytes" "context" + "errors" "path/filepath" + "sort" "strconv" "strings" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - net "github.com/shirou/gopsutil/v3/net" "golang.org/x/sys/unix" + + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + net "github.com/shirou/gopsutil/v4/net" ) func pidsWithContext(ctx context.Context) ([]int32, error) { @@ -83,10 +86,7 @@ func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { return "", err } ret := strings.FieldsFunc(string(buf), func(r rune) bool { - if r == '\u0000' { - return true - } - return false + return r == '\u0000' }) return strings.Join(ret, " "), nil @@ -157,40 +157,40 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil @@ -270,18 +270,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -289,8 +292,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func ProcessesWithContext(ctx context.Context) ([]*Process, error) { @@ -331,7 +334,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go index 08ab333b435..279ba9fbb40 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go index 560e627d249..f3b70ec1bec 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go index 81ae0b9a8d4..75ed3063050 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_freebsd.go diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go index 73ac08201a0..3dc301c027f 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_freebsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build freebsd && arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_linux.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_linux.go index 557435b345d..68a8c88c4a9 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_linux.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux -// +build linux package process @@ -12,15 +12,16 @@ import ( "math" "os" "path/filepath" + "sort" "strconv" "strings" "github.com/tklauser/go-sysconf" "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) var pageSize = uint64(os.Getpagesize()) @@ -148,26 +149,26 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return pgid == tpgid, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { err := p.fillFromStatusWithContext(ctx) if err != nil { - return []int32{}, err + return []uint32{}, err } return p.groups, nil } @@ -338,21 +339,34 @@ func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + statFiles, err := filepath.Glob(common.HostProcWithContext(ctx, "[0-9]*/stat")) if err != nil { return nil, err } - if len(pids) == 0 { - return nil, ErrorNoChildren - } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(statFiles)) + for _, statFile := range statFiles { + statContents, err := os.ReadFile(statFile) if err != nil { - return nil, err + continue + } + fields := splitProcStat(statContents) + pid, err := strconv.ParseInt(fields[1], 10, 32) + if err != nil { + continue + } + ppid, err := strconv.ParseInt(fields[4], 10, 32) + if err != nil { + continue + } + if int32(ppid) == p.Pid { + np, err := NewProcessWithContext(ctx, int32(pid)) + if err != nil { + continue + } + ret = append(ret, np) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -373,8 +387,8 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { - return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, maxConn) } func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { @@ -399,7 +413,9 @@ func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]M // function of parsing a block getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) { m := MemoryMapsStat{} - m.Path = firstLine[len(firstLine)-1] + if len(firstLine) >= 6 { + m.Path = strings.Join(firstLine[5:], " ") + } for _, line := range block { if strings.Contains(line, "VmFlags") { @@ -727,8 +743,12 @@ func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, e case "syscw": ret.WriteCount = t case "read_bytes": - ret.ReadBytes = t + ret.DiskReadBytes = t case "write_bytes": + ret.DiskWriteBytes = t + case "rchar": + ret.ReadBytes = t + case "wchar": ret.WriteBytes = t } } @@ -866,32 +886,32 @@ func (p *Process) fillFromStatusWithContext(ctx context.Context) error { } p.tgid = int32(pval) case "Uid": - p.uids = make([]int32, 0, 4) + p.uids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.uids = append(p.uids, int32(v)) + p.uids = append(p.uids, uint32(v)) } case "Gid": - p.gids = make([]int32, 0, 4) + p.gids = make([]uint32, 0, 4) for _, i := range strings.Split(value, "\t") { v, err := strconv.ParseInt(i, 10, 32) if err != nil { return err } - p.gids = append(p.gids, int32(v)) + p.gids = append(p.gids, uint32(v)) } case "Groups": groups := strings.Fields(value) - p.groups = make([]int32, 0, len(groups)) + p.groups = make([]uint32, 0, len(groups)) for _, i := range groups { - v, err := strconv.ParseInt(i, 10, 32) + v, err := strconv.ParseUint(i, 10, 32) if err != nil { return err } - p.groups = append(p.groups, int32(v)) + p.groups = append(p.groups, uint32(v)) } case "Threads": v, err := strconv.ParseInt(value, 10, 32) @@ -1076,8 +1096,7 @@ func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (ui if err != nil { return 0, 0, nil, 0, 0, 0, nil, err } - ctime := (t / uint64(clockTicks)) + uint64(bootTime) - createTime := int64(ctime * 1000) + createTime := int64((t * 1000 / uint64(clockTicks)) + uint64(bootTime*1000)) rtpriority, err := strconv.ParseInt(fields[18], 10, 32) if err != nil { diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go similarity index 85% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go index a58c5eb113c..5e8a9e0b45e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd -// +build openbsd package process @@ -7,17 +7,19 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "path/filepath" + "sort" "strconv" "strings" "unsafe" - cpu "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - mem "github.com/shirou/gopsutil/v3/mem" - net "github.com/shirou/gopsutil/v3/net" + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + mem "github.com/shirou/gopsutil/v4/mem" + net "github.com/shirou/gopsutil/v4/net" "golang.org/x/sys/unix" ) @@ -68,7 +70,12 @@ func (p *Process) NameWithContext(ctx context.Context) (string, error) { } func (p *Process) CwdWithContext(ctx context.Context) (string, error) { - return "", common.ErrNotImplementedError + mib := []int32{CTLKern, KernProcCwd, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + return common.ByteToString(buf), nil } func (p *Process) ExeWithContext(ctx context.Context) (string, error) { @@ -171,40 +178,40 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return strings.IndexByte(string(out), '+') != -1, nil } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - uids := make([]int32, 0, 3) + uids := make([]uint32, 0, 3) - uids = append(uids, int32(k.Ruid), int32(k.Uid), int32(k.Svuid)) + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) return uids, nil } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - gids := make([]int32, 0, 3) - gids = append(gids, int32(k.Rgid), int32(k.Ngroups), int32(k.Svgid)) + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) return gids, nil } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { k, err := p.getKProc() if err != nil { return nil, err } - groups := make([]int32, k.Ngroups) + groups := make([]uint32, k.Ngroups) for i := int16(0); i < k.Ngroups; i++ { - groups[i] = int32(k.Groups[i]) + groups[i] = uint32(k.Groups[i]) } return groups, nil @@ -280,18 +287,21 @@ func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, e } func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { - pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + procs, err := ProcessesWithContext(ctx) if err != nil { - return nil, err + return nil, nil } - ret := make([]*Process, 0, len(pids)) - for _, pid := range pids { - np, err := NewProcessWithContext(ctx, pid) + ret := make([]*Process, 0, len(procs)) + for _, proc := range procs { + ppid, err := proc.PpidWithContext(ctx) if err != nil { - return nil, err + continue + } + if ppid == p.Pid { + ret = append(ret, proc) } - ret = append(ret, np) } + sort.Slice(ret, func(i, j int) bool { return ret[i].Pid < ret[j].Pid }) return ret, nil } @@ -299,7 +309,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } @@ -338,7 +348,7 @@ func (p *Process) getKProc() (*KinfoProc, error) { return nil, err } if length != sizeOfKinfoProc { - return nil, err + return nil, errors.New("unexpected size of KinfoProc") } k, err := parseKinfoProc(buf) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go index f4ed0249172..5b84706a7cf 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_386.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && 386 -// +build openbsd,386 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go index 8607422b5f5..3229bb32c28 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_amd64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause // Created by cgo -godefs - DO NOT EDIT // cgo -godefs types_openbsd.go @@ -11,6 +12,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go index b94429f2e96..6f74ce75637 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm -// +build openbsd,arm // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go index a3291b8caf1..91045456258 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_arm64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && arm64 -// +build openbsd,arm64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go similarity index 98% rename from vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go index 076f095eaae..e3e0d36a09e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_openbsd_riscv64.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build openbsd && riscv64 -// +build openbsd,riscv64 // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs process/types_openbsd.go @@ -14,6 +14,7 @@ const ( KernProcProc = 8 KernProcPathname = 12 KernProcArgs = 55 + KernProcCwd = 78 KernProcArgv = 1 KernProcEnv = 3 ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go similarity index 92% rename from vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go index bc4bc062a99..c82e54a75bc 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_plan9.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build plan9 -// +build plan9 package process @@ -7,9 +7,9 @@ import ( "context" "syscall" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type Signal = syscall.Note @@ -82,15 +82,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -166,7 +166,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_posix.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_posix.go index a01f9ecfc0d..caa9d3f7c03 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_posix.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build linux || freebsd || openbsd || darwin || solaris -// +build linux freebsd openbsd darwin solaris package process @@ -16,7 +16,7 @@ import ( "golang.org/x/sys/unix" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" ) type Signal = syscall.Signal diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go similarity index 94% rename from vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go index dd4bd4760bd..5c8d4d3b1ef 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_solaris.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: BSD-3-Clause package process import ( @@ -7,9 +8,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" ) type MemoryMapsStat struct { @@ -95,15 +96,15 @@ func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { return false, common.ErrNotImplementedError } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -180,7 +181,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return nil, common.ErrNotImplementedError } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows.go index f2053d98553..b00c671e9f3 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build windows -// +build windows package process @@ -18,9 +18,9 @@ import ( "unicode/utf16" "unsafe" - "github.com/shirou/gopsutil/v3/cpu" - "github.com/shirou/gopsutil/v3/internal/common" - "github.com/shirou/gopsutil/v3/net" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" "golang.org/x/sys/windows" ) @@ -43,6 +43,7 @@ var ( procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters") procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetProcessHandleCount = common.Modkernel32.NewProc("GetProcessHandleCount") processorArchitecture uint ) @@ -466,15 +467,15 @@ func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { return domain + "\\" + user, err } -func (p *Process) UidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GidsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } -func (p *Process) GroupsWithContext(ctx context.Context) ([]int32, error) { +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { return nil, common.ErrNotImplementedError } @@ -548,8 +549,21 @@ func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitche return nil, common.ErrNotImplementedError } +// NumFDsWithContext returns the number of handles for a process on Windows, +// not the number of file descriptors (FDs). func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { - return 0, common.ErrNotImplementedError + handle, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(handle) + + var handleCount uint32 + ret, _, err := procGetProcessHandleCount.Call(uintptr(handle), uintptr(unsafe.Pointer(&handleCount))) + if ret == 0 { + return 0, err + } + return int32(handleCount), nil } func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { @@ -744,7 +758,7 @@ func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionS return net.ConnectionsPidWithContext(ctx, "all", p.Pid) } -func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, maxConn int) ([]net.ConnectionStat, error) { return nil, common.ErrNotImplementedError } diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go similarity index 97% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go index db4d453349c..2b231c79d04 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_32bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && 386) || (windows && arm) -// +build windows,386 windows,arm package process @@ -8,7 +8,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go similarity index 95% rename from vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go rename to vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go index 74c6212cfde..befe5213900 100644 --- a/vendor/github.com/shirou/gopsutil/v3/process/process_windows_64bit.go +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go @@ -1,5 +1,5 @@ +// SPDX-License-Identifier: BSD-3-Clause //go:build (windows && amd64) || (windows && arm64) -// +build windows,amd64 windows,arm64 package process @@ -7,7 +7,7 @@ import ( "syscall" "unsafe" - "github.com/shirou/gopsutil/v3/internal/common" + "github.com/shirou/gopsutil/v4/internal/common" "golang.org/x/sys/windows" ) diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml deleted file mode 100644 index dc6fefb979e..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml +++ /dev/null @@ -1,12 +0,0 @@ -run: - timeout: 5m -linters: - enable: - - gofmt - - errcheck - - errname - - errorlint - - bodyclose - - durationcheck - - whitespace - diff --git a/vendor/github.com/shoenig/go-m1cpu/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile deleted file mode 100644 index 28d786397d4..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -SHELL = bash - -default: test - -.PHONY: test -test: - @echo "--> Running Tests ..." - @go test -v -race ./... - -vet: - @echo "--> Vet Go sources ..." - @go vet ./... diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md deleted file mode 100644 index 399657acf86..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# m1cpu - -[![Go Reference](https://pkg.go.dev/badge/github.com/shoenig/go-m1cpu.svg)](https://pkg.go.dev/github.com/shoenig/go-m1cpu) -[![MPL License](https://img.shields.io/github/license/shoenig/go-m1cpu?color=g&style=flat-square)](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE) -[![Run CI Tests](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml/badge.svg)](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml) - -The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go. - -Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs. - -# Install - -```shell -go get github.com/shoenig/go-m1cpu@latest -``` - -# CGO - -This package requires the use of [CGO](https://go.dev/blog/cgo). - -Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc) -framework, which is accessible only through system C libraries. - -# Example - -Simple Go program to print Apple Silicon M1/M2 CPU speeds. - -```go -package main - -import ( - "fmt" - - "github.com/shoenig/go-m1cpu" -) - -func main() { - fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon()) - - fmt.Println("pCore GHz", m1cpu.PCoreGHz()) - fmt.Println("eCore GHz", m1cpu.ECoreGHz()) - - fmt.Println("pCore Hz", m1cpu.PCoreHz()) - fmt.Println("eCore Hz", m1cpu.ECoreHz()) -} -``` - -Using `go test` to print out available information. - -``` -➜ go test -v -run Show -=== RUN Test_Show - cpu_test.go:42: pCore Hz 3504000000 - cpu_test.go:43: eCore Hz 2424000000 - cpu_test.go:44: pCore GHz 3.504 - cpu_test.go:45: eCore GHz 2.424 - cpu_test.go:46: pCore count 8 - cpu_test.go:47: eCoreCount 4 - cpu_test.go:50: pCore Caches 196608 131072 16777216 - cpu_test.go:53: eCore Caches 131072 65536 4194304 ---- PASS: Test_Show (0.00s) -``` - -# License - -Open source under the [MPL](LICENSE) diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go deleted file mode 100644 index 502a8cce92e..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/cpu.go +++ /dev/null @@ -1,213 +0,0 @@ -//go:build darwin && arm64 && cgo - -package m1cpu - -// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit -// #include -// #include -// #include -// #include -// -// #if !defined(MAC_OS_VERSION_12_0) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0 -// #define kIOMainPortDefault kIOMasterPortDefault -// #endif -// -// #define HzToGHz(hz) ((hz) / 1000000000.0) -// -// UInt64 global_pCoreHz; -// UInt64 global_eCoreHz; -// int global_pCoreCount; -// int global_eCoreCount; -// int global_pCoreL1InstCacheSize; -// int global_eCoreL1InstCacheSize; -// int global_pCoreL1DataCacheSize; -// int global_eCoreL1DataCacheSize; -// int global_pCoreL2CacheSize; -// int global_eCoreL2CacheSize; -// char global_brand[32]; -// -// UInt64 getFrequency(CFTypeRef typeRef) { -// CFDataRef cfData = typeRef; -// -// CFIndex size = CFDataGetLength(cfData); -// UInt8 buf[size]; -// CFDataGetBytes(cfData, CFRangeMake(0, size), buf); -// -// UInt8 b1 = buf[size-5]; -// UInt8 b2 = buf[size-6]; -// UInt8 b3 = buf[size-7]; -// UInt8 b4 = buf[size-8]; -// -// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4)); -// return pCoreHz; -// } -// -// int sysctl_int(const char * name) { -// int value = -1; -// size_t size = 8; -// sysctlbyname(name, &value, &size, NULL, 0); -// return value; -// } -// -// void sysctl_string(const char * name, char * dest) { -// size_t size = 32; -// sysctlbyname(name, dest, &size, NULL, 0); -// } -// -// void initialize() { -// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu"); -// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu"); -// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize"); -// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize"); -// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize"); -// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize"); -// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize"); -// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize"); -// sysctl_string("machdep.cpu.brand_string", global_brand); -// -// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice"); -// io_iterator_t iter; -// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter); -// -// const size_t bufsize = 512; -// io_object_t obj; -// while ((obj = IOIteratorNext(iter))) { -// char class[bufsize]; -// IOObjectGetClass(obj, class); -// char name[bufsize]; -// IORegistryEntryGetName(obj, name); -// -// if (strncmp(name, "pmgr", bufsize) == 0) { -// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0); -// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0); -// -// long long pCoreHz = getFrequency(pCoreRef); -// long long eCoreHz = getFrequency(eCoreRef); -// -// global_pCoreHz = pCoreHz; -// global_eCoreHz = eCoreHz; -// return; -// } -// } -// } -// -// UInt64 eCoreHz() { -// return global_eCoreHz; -// } -// -// UInt64 pCoreHz() { -// return global_pCoreHz; -// } -// -// Float64 eCoreGHz() { -// return HzToGHz(global_eCoreHz); -// } -// -// Float64 pCoreGHz() { -// return HzToGHz(global_pCoreHz); -// } -// -// int pCoreCount() { -// return global_pCoreCount; -// } -// -// int eCoreCount() { -// return global_eCoreCount; -// } -// -// int pCoreL1InstCacheSize() { -// return global_pCoreL1InstCacheSize; -// } -// -// int pCoreL1DataCacheSize() { -// return global_pCoreL1DataCacheSize; -// } -// -// int pCoreL2CacheSize() { -// return global_pCoreL2CacheSize; -// } -// -// int eCoreL1InstCacheSize() { -// return global_eCoreL1InstCacheSize; -// } -// -// int eCoreL1DataCacheSize() { -// return global_eCoreL1DataCacheSize; -// } -// -// int eCoreL2CacheSize() { -// return global_eCoreL2CacheSize; -// } -// -// char * modelName() { -// return global_brand; -// } -import "C" - -func init() { - C.initialize() -} - -// IsAppleSilicon returns true on this platform. -func IsAppleSilicon() bool { - return true -} - -// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU. -func PCoreHz() uint64 { - return uint64(C.pCoreHz()) -} - -// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU. -func ECoreHz() uint64 { - return uint64(C.eCoreHz()) -} - -// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU. -func PCoreGHz() float64 { - return float64(C.pCoreGHz()) -} - -// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU. -func ECoreGHz() float64 { - return float64(C.eCoreGHz()) -} - -// PCoreCount returns the number of physical P (performance) cores. -func PCoreCount() int { - return int(C.pCoreCount()) -} - -// ECoreCount returns the number of physical E (efficiency) cores. -func ECoreCount() int { - return int(C.eCoreCount()) -} - -// PCoreCacheSize returns the sizes of the P (performance) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func PCoreCache() (int, int, int) { - return int(C.pCoreL1InstCacheSize()), - int(C.pCoreL1DataCacheSize()), - int(C.pCoreL2CacheSize()) -} - -// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes -// in the order of -// -// - L1 instruction cache -// - L1 data cache -// - L2 cache -func ECoreCache() (int, int, int) { - return int(C.eCoreL1InstCacheSize()), - int(C.eCoreL1DataCacheSize()), - int(C.eCoreL2CacheSize()) -} - -// ModelName returns the model name of the CPU. -func ModelName() string { - return C.GoString(C.modelName()) -} diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go deleted file mode 100644 index d425025aa84..00000000000 --- a/vendor/github.com/shoenig/go-m1cpu/incompatible.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !darwin || !arm64 || !cgo - -package m1cpu - -// IsAppleSilicon return false on this platform. -func IsAppleSilicon() bool { - return false -} - -// PCoreHZ requires darwin/arm64 -func PCoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreHZ requires darwin/arm64 -func ECoreHz() uint64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreGHz requires darwin/arm64 -func PCoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreGHz requires darwin/arm64 -func ECoreGHz() float64 { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCount requires darwin/arm64 -func PCoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCount requires darwin/arm64 -func ECoreCount() int { - panic("m1cpu: not a darwin/arm64 system") -} - -// PCoreCacheSize requires darwin/arm64 -func PCoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ECoreCacheSize requires darwin/arm64 -func ECoreCache() (int, int, int) { - panic("m1cpu: not a darwin/arm64 system") -} - -// ModelName requires darwin/arm64 -func ModelName() string { - panic("m1cpu: not a darwin/arm64 system") -} diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24d84..2c8f4808c1a 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e057f2..25c30e3ccc3 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a81d..ed1e70ceaa4 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a531518409..f4d198cbcbd 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce53c..e0b0947b04c 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -193,8 +193,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c131a7..54748fc67eb 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -154,8 +154,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -706,7 +708,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +756,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +875,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -1187,10 +1189,11 @@ func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() if c.Flags().Lookup("help") == nil { usage := "help for " - if c.Name() == "" { + name := c.displayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } c.Flags().BoolP("help", "h", false, usage) _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) @@ -1236,7 +1239,7 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, +Simply type ` + c.displayName() + ` help [path to command] for full details.`, ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { var completions []string cmd, _, e := c.Root().Find(args) @@ -1427,6 +1430,10 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.displayName() +} + +func (c *Command) displayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1443,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1460,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1649,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1660,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1674,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1702,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1728,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1736,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1749,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1868,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b20007..c0c08b05721 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -211,24 +213,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +244,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -291,7 +298,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -899,3 +906,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f202..560612fd338 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 55195193944..a830b7bcad2 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad6fe..7e19eba0904 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109ad9..19063416577 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd409..21629087baf 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0599..1d2f71824aa 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f21c6..4e91332bb51 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000000..baa0cc7d7fc --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000000..b83c6cf64c2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000000..e78f7dfe69a --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index 213bde2ea63..eb5682df978 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -80,12 +80,12 @@ type Call struct { requires []*Call } -func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments ...interface{}) *Call { +func newCall(parent *Mock, methodName string, callerInfo []string, methodArguments Arguments, returnArguments Arguments) *Call { return &Call{ Parent: parent, Method: methodName, Arguments: methodArguments, - ReturnArguments: make([]interface{}, 0), + ReturnArguments: returnArguments, callerInfo: callerInfo, Repeatability: 0, WaitFor: nil, @@ -256,7 +256,7 @@ func (c *Call) Unset() *Call { // calls have been called as expected. The referenced calls may be from the // same mock instance and/or other mock instances. // -// Mock.On("Do").Return(nil).Notbefore( +// Mock.On("Do").Return(nil).NotBefore( // Mock.On("Init").Return(nil) // ) func (c *Call) NotBefore(calls ...*Call) *Call { @@ -273,6 +273,20 @@ func (c *Call) NotBefore(calls ...*Call) *Call { return c } +// InOrder defines the order in which the calls should be made +// +// For example: +// +// InOrder( +// Mock.On("init").Return(nil), +// Mock.On("Do").Return(nil), +// ) +func InOrder(calls ...*Call) { + for i := 1; i < len(calls); i++ { + calls[i].NotBefore(calls[i-1]) + } +} + // Mock is the workhorse used to track activity on another object. // For an example of its usage, refer to the "Example Usage" section at the top // of this document. @@ -351,7 +365,8 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call { m.mutex.Lock() defer m.mutex.Unlock() - c := newCall(m, methodName, assert.CallerInfo(), arguments...) + + c := newCall(m, methodName, assert.CallerInfo(), arguments, make([]interface{}, 0)) m.ExpectedCalls = append(m.ExpectedCalls, c) return c } @@ -491,11 +506,12 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen m.mutex.Unlock() if closestCall != nil { - m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s", + m.fail("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\nDiff: %s\nat: %s\n", callString(methodName, arguments, true), callString(methodName, closestCall.Arguments, true), diffArguments(closestCall.Arguments, arguments), strings.TrimSpace(mismatch), + assert.CallerInfo(), ) } else { m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -529,7 +545,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen call.totalCalls++ // add the call - m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments...)) + m.Calls = append(m.Calls, *newCall(m, methodName, assert.CallerInfo(), arguments, call.ReturnArguments)) m.mutex.Unlock() // block if specified @@ -764,9 +780,17 @@ const ( ) // AnythingOfTypeArgument contains the type of an argument -// for use when type checking. Used in Diff and Assert. +// for use when type checking. Used in [Arguments.Diff] and [Arguments.Assert]. // -// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +// Deprecated: this is an implementation detail that must not be used. Use the [AnythingOfType] constructor instead, example: +// +// m.On("Do", mock.AnythingOfType("string")) +// +// All explicit type declarations can be replaced with interface{} as is expected by [Mock.On], example: +// +// func anyString interface{} { +// return mock.AnythingOfType("string") +// } type AnythingOfTypeArgument = anythingOfTypeArgument // anythingOfTypeArgument is a string that contains the type of an argument @@ -780,53 +804,54 @@ type anythingOfTypeArgument string // // For example: // -// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +// args.Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument -// for use when type checking. This is an alternative to AnythingOfType. -// Used in Diff and Assert. +// for use when type checking. This is an alternative to [AnythingOfType]. +// Used in [Arguments.Diff] and [Arguments.Assert]. type IsTypeArgument struct { t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. // You can provide a zero-value of the type to check. This is an -// alternative to AnythingOfType. Used in Diff and Assert. +// alternative to [AnythingOfType]. Used in [Arguments.Diff] and [Arguments.Assert]. // // For example: -// Assert(t, IsType(""), IsType(0)) +// +// args.Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { return &IsTypeArgument{t: reflect.TypeOf(t)} } -// FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument -// for use when type checking. +// FunctionalOptionsArgument contains a list of functional options arguments +// expected for use when matching a list of arguments. type FunctionalOptionsArgument struct { - value interface{} + values []interface{} } // String returns the string representation of FunctionalOptionsArgument func (f *FunctionalOptionsArgument) String() string { var name string - tValue := reflect.ValueOf(f.value) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + if len(f.values) > 0 { + name = "[]" + reflect.TypeOf(f.values[0]).String() } - return strings.Replace(fmt.Sprintf("%#v", f.value), "[]interface {}", name, 1) + return strings.Replace(fmt.Sprintf("%#v", f.values), "[]interface {}", name, 1) } -// FunctionalOptions returns an FunctionalOptionsArgument object containing the functional option type -// and the values to check of +// FunctionalOptions returns an [FunctionalOptionsArgument] object containing +// the expected functional-options to check for. // // For example: -// Assert(t, FunctionalOptions("[]foo.FunctionalOption", foo.Opt1(), foo.Opt2())) -func FunctionalOptions(value ...interface{}) *FunctionalOptionsArgument { +// +// args.Assert(t, FunctionalOptions(foo.Opt1("strValue"), foo.Opt2(613))) +func FunctionalOptions(values ...interface{}) *FunctionalOptionsArgument { return &FunctionalOptionsArgument{ - value: value, + values: values, } } @@ -873,10 +898,11 @@ func (f argumentMatcher) String() string { // and false otherwise. // // Example: -// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) // -// |fn|, must be a function accepting a single argument (of the expected type) -// which returns a bool. If |fn| doesn't match the required signature, +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// fn must be a function accepting a single argument (of the expected type) +// which returns a bool. If fn doesn't match the required signature, // MatchedBy() panics. func MatchedBy(fn interface{}) argumentMatcher { fnType := reflect.TypeOf(fn) @@ -979,20 +1005,17 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) } case *FunctionalOptionsArgument: - t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + if len(expected.values) > 0 { + name = "[]" + reflect.TypeOf(expected.values[0]).String() } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + const tName = "[]interface{}" + if name != reflect.TypeOf(actual).String() && len(expected.values) != 0 { differences++ output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + if ef, af := assertOpts(expected.values, actual); ef == "" && af == "" { // match output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) } else { @@ -1092,7 +1115,7 @@ func (args Arguments) Error(index int) error { return nil } if s, ok = obj.(error); !ok { - panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, obj)) } return s } @@ -1181,32 +1204,38 @@ type tHelper interface { func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { expectedOpts := reflect.ValueOf(expected) actualOpts := reflect.ValueOf(actual) + + var expectedFuncs []*runtime.Func var expectedNames []string for i := 0; i < expectedOpts.Len(); i++ { - expectedNames = append(expectedNames, funcName(expectedOpts.Index(i).Interface())) + f := runtimeFunc(expectedOpts.Index(i).Interface()) + expectedFuncs = append(expectedFuncs, f) + expectedNames = append(expectedNames, funcName(f)) } + var actualFuncs []*runtime.Func var actualNames []string for i := 0; i < actualOpts.Len(); i++ { - actualNames = append(actualNames, funcName(actualOpts.Index(i).Interface())) + f := runtimeFunc(actualOpts.Index(i).Interface()) + actualFuncs = append(actualFuncs, f) + actualNames = append(actualNames, funcName(f)) } - if !assert.ObjectsAreEqual(expectedNames, actualNames) { + + if expectedOpts.Len() != actualOpts.Len() { expectedFmt = fmt.Sprintf("%v", expectedNames) actualFmt = fmt.Sprintf("%v", actualNames) return } for i := 0; i < expectedOpts.Len(); i++ { - expectedOpt := expectedOpts.Index(i).Interface() - actualOpt := actualOpts.Index(i).Interface() - - expectedFunc := expectedNames[i] - actualFunc := actualNames[i] - if expectedFunc != actualFunc { - expectedFmt = expectedFunc - actualFmt = actualFunc + if !isFuncSame(expectedFuncs[i], actualFuncs[i]) { + expectedFmt = expectedNames[i] + actualFmt = actualNames[i] return } + expectedOpt := expectedOpts.Index(i).Interface() + actualOpt := actualOpts.Index(i).Interface() + ot := reflect.TypeOf(expectedOpt) var expectedValues []reflect.Value var actualValues []reflect.Value @@ -1224,9 +1253,9 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { reflect.ValueOf(actualOpt).Call(actualValues) for i := 0; i < ot.NumIn(); i++ { - if !assert.ObjectsAreEqual(expectedValues[i].Interface(), actualValues[i].Interface()) { - expectedFmt = fmt.Sprintf("%s %+v", expectedNames[i], expectedValues[i].Interface()) - actualFmt = fmt.Sprintf("%s %+v", expectedNames[i], actualValues[i].Interface()) + if expectedArg, actualArg := expectedValues[i].Interface(), actualValues[i].Interface(); !assert.ObjectsAreEqual(expectedArg, actualArg) { + expectedFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], expectedArg, expectedArg) + actualFmt = fmt.Sprintf("%s(%T) -> %#v", expectedNames[i], actualArg, actualArg) return } } @@ -1235,7 +1264,25 @@ func assertOpts(expected, actual interface{}) (expectedFmt, actualFmt string) { return "", "" } -func funcName(opt interface{}) string { - n := runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name() - return strings.TrimSuffix(path.Base(n), path.Ext(n)) +func runtimeFunc(opt interface{}) *runtime.Func { + return runtime.FuncForPC(reflect.ValueOf(opt).Pointer()) +} + +func funcName(f *runtime.Func) string { + name := f.Name() + trimmed := strings.TrimSuffix(path.Base(name), path.Ext(name)) + splitted := strings.Split(trimmed, ".") + + if len(splitted) == 0 { + return trimmed + } + + return splitted[len(splitted)-1] +} + +func isFuncSame(f1, f2 *runtime.Func) bool { + f1File, f1Loc := f1.FileLine(f1.Entry()) + f2File, f2Loc := f2.FileLine(f2.Entry()) + + return f1File == f2File && f1Loc == f2Loc } diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f8077..d8921950d7b 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42ddebdc..8b328368509 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a5fa..1bd87304f43 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfeb91..6b7ce929eb1 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 1b27f196286..33e6595cca3 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,10 +1,10 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.20 + GO_VERSION: go1.22.2 -freebsd_12_task: +freebsd_13_task: freebsd_instance: - image_family: freebsd-12-3 + image_family: freebsd-13-2 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest @@ -12,9 +12,9 @@ freebsd_12_task: build_script: bin/${GO_VERSION} build -v ./... test_script: bin/${GO_VERSION} test -race ./... -freebsd_13_task: +freebsd_14_task: freebsd_instance: - image_family: freebsd-13-0 + image_family: freebsd-14-0 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go index 7c96157bb79..ec81c02ac88 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go index 3f5d83f6920..b471ec10443 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go @@ -16,6 +16,10 @@ const ( _HOST_NAME_MAX = _MAXHOSTNAMELEN - 1 _LOGIN_NAME_MAX = _MAXLOGNAME _SYMLOOP_MAX = _MAXSYMLINKS + + // _PTHREAD_STACK_MIN changed in macOS 14 + _PTHREAD_STACK_MIN_LT_MACOS14 = 0x2000 + _PTHREAD_STACK_MIN_GE_MACOS14 = 0x4000 ) var uname struct { @@ -23,6 +27,21 @@ var uname struct { macOSMajor int } +func getMacOSMajor() int { + uname.Once.Do(func() { + var u unix.Utsname + err := unix.Uname(&u) + if err != nil { + return + } + rel := unix.ByteSliceToString(u.Release[:]) + ver := strings.Split(rel, ".") + maj, _ := strconv.Atoi(ver[0]) + uname.macOSMajor = maj + }) + return uname.macOSMajor +} + // sysconf implements sysconf(4) as in the Darwin libc (derived from the FreeBSD // libc), version 1534.81.1. // See https://github.com/apple-oss-distributions/Libc/tree/Libc-1534.81.1. @@ -91,7 +110,10 @@ func sysconf(name int) (int64, error) { case SC_THREAD_PRIO_PROTECT: return _POSIX_THREAD_PRIO_PROTECT, nil case SC_THREAD_STACK_MIN: - return _PTHREAD_STACK_MIN, nil + if getMacOSMajor() < 23 { + return _PTHREAD_STACK_MIN_LT_MACOS14, nil + } + return _PTHREAD_STACK_MIN_GE_MACOS14, nil case SC_THREAD_THREADS_MAX: return -1, nil case SC_TIMER_MAX: @@ -140,18 +162,7 @@ func sysconf(name int) (int64, error) { } return _POSIX_SEMAPHORES, nil case SC_SPAWN: - uname.Once.Do(func() { - var u unix.Utsname - err := unix.Uname(&u) - if err != nil { - return - } - rel := unix.ByteSliceToString(u.Release[:]) - ver := strings.Split(rel, ".") - maj, _ := strconv.Atoi(ver[0]) - uname.macOSMajor = maj - }) - if uname.macOSMajor < 22 { + if getMacOSMajor() < 22 { return -1, nil } // macOS 13 (Ventura) and later diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go b/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go index 248bdc99cda..7dcc6f4cabf 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd linux netbsd openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go b/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go index 5fb49ac7b6a..9af70070e2b 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go @@ -6,7 +6,6 @@ package sysconf import ( "bufio" - "io/ioutil" "os" "runtime" "strconv" @@ -26,7 +25,7 @@ const ( ) func readProcFsInt64(path string, fallback int64) int64 { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return fallback } @@ -86,10 +85,16 @@ func getNprocsProcStat() (int64, error) { s := bufio.NewScanner(f) for s.Scan() { if line := strings.TrimSpace(s.Text()); strings.HasPrefix(line, "cpu") { - l := strings.SplitN(line, " ", 2) - _, err := strconv.ParseInt(l[0][3:], 10, 64) - if err == nil { - count++ + cpu, _, found := strings.Cut(line, " ") + if found { + // skip first line with accumulated values + if cpu == "cpu" { + continue + } + _, err := strconv.ParseInt(cpu[len("cpu"):], 10, 64) + if err == nil { + count++ + } } } else { // The current format of /proc/stat has all the @@ -98,6 +103,9 @@ func getNprocsProcStat() (int64, error) { break } } + if err := s.Err(); err != nil { + return -1, err + } return count, nil } diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go index 325d4a6a83f..40f6c345fcd 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go @@ -25,10 +25,10 @@ const ( _POSIX2_UPE = -1 ) -var ( - clktck int64 - clktckOnce sync.Once -) +var clktck struct { + sync.Once + v int64 +} func sysconfPOSIX(name int) (int64, error) { // NetBSD does not define all _POSIX_* values used in sysconf_posix.go @@ -42,7 +42,6 @@ func sysconf(name int) (int64, error) { // Duplicate the relevant values here. switch name { - // 1003.1 case SC_ARG_MAX: return sysctl32("kern.argmax"), nil @@ -55,13 +54,14 @@ func sysconf(name int) (int64, error) { } return -1, nil case SC_CLK_TCK: - clktckOnce.Do(func() { - clktck = -1 + // TODO: use sync.OnceValue once Go 1.21 is the minimal supported version + clktck.Do(func() { + clktck.v = -1 if ci, err := unix.SysctlClockinfo("kern.clockrate"); err == nil { - clktck = int64(ci.Hz) + clktck.v = int64(ci.Hz) } }) - return clktck, nil + return clktck.v, nil case SC_NGROUPS_MAX: return sysctl32("kern.ngroups"), nil case SC_JOB_CONTROL: diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go b/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go index e61c0bc73e4..830d8220b51 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || linux || openbsd -// +build darwin dragonfly freebsd linux openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go b/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go index 478d692005e..5aa9119db73 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go index 6fadf3db1fa..80b64393bc6 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_darwin.go //go:build darwin -// +build darwin package sysconf @@ -235,7 +234,6 @@ const ( _PTHREAD_DESTRUCTOR_ITERATIONS = 0x4 _PTHREAD_KEYS_MAX = 0x200 - _PTHREAD_STACK_MIN = 0x2000 ) const ( diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go index 0864cd44827..dae56570c02 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_dragonfly.go //go:build dragonfly -// +build dragonfly package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go index 9885411acbd..068f8a7edaf 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_freebsd.go //go:build freebsd -// +build freebsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go index 8545a342b90..12f289d76fa 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_linux.go //go:build linux -// +build linux package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go index d2aaf077704..772af475a46 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_netbsd.go //go:build netbsd -// +build netbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go index badc66cbda8..625b098f913 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_openbsd.go //go:build openbsd -// +build openbsd package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go index 29b6f8746a1..c155cf57966 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_defs_solaris.go //go:build solaris -// +build solaris package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go index 478fe63a98e..b5d4807482c 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && 386 -// +build freebsd,386 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go index 7f58a4d8bac..89c880aae27 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && amd64 -// +build freebsd,amd64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go index deb47595ba4..7b65fdd6fb3 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && arm -// +build freebsd,arm package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go index 556ba3da212..a86cb32bdfa 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && arm64 -// +build freebsd,arm64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go index b7cff760b16..6c847aeeaa7 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_freebsd.go //go:build freebsd && riscv64 -// +build freebsd,riscv64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go index 16ee7ea64cf..90963eb422c 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && 386 -// +build linux,386 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go index 39aee349f21..28ad6f18383 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && amd64 -// +build linux,amd64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go index 2e401164e3f..ffbcf37d403 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && arm -// +build linux,arm package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go index 362403abccd..cc9f4d88d43 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && arm64 -// +build linux,arm64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go index 95a71f4a2c7..f62b15a6978 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && loong64 -// +build linux,loong64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go index 868b0ffb336..37f492a81f1 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mips -// +build linux,mips package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go index 5949f3d71fd..ae7b7f9c238 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mips64 -// +build linux,mips64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go index 1853419a32d..fe14670f2ab 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mips64le -// +build linux,mips64le package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go index ff41b3469bb..d204585be95 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && mipsle -// +build linux,mipsle package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go index 388743728ac..9ec78d335ee 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && ppc64 -// +build linux,ppc64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go index 6d76929a64c..a5420672984 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && ppc64le -// +build linux,ppc64le package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go index 3d7d71b3221..bfb923920a1 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && riscv64 -// +build linux,riscv64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go index 9cf8529f53a..6e935c87359 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_linux.go //go:build linux && s390x -// +build linux,s390x package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go index 3cd64dd6626..ea0b24a822e 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && 386 -// +build netbsd,386 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go index 02fc1d0ef93..2d377e253ce 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && amd64 -// +build netbsd,amd64 package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go index 16f9b6e71eb..4a6d83670a5 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && arm -// +build netbsd,arm package sysconf diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go index e530339ca7e..49fb6725ef4 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go @@ -2,7 +2,6 @@ // cgo -godefs sysconf_values_netbsd.go //go:build netbsd && arm64 -// +build netbsd,arm64 package sysconf diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index 69c6ced5c75..33e6595cca3 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,13 +1,23 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.20 + GO_VERSION: go1.22.2 -freebsd_12_task: +freebsd_13_task: freebsd_instance: - image_family: freebsd-12-3 + image_family: freebsd-13-2 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest bin/${GO_VERSION} download - build_script: bin/${GO_VERSION} build -buildvcs=false -v ./... - test_script: bin/${GO_VERSION} test -buildvcs=false -race ./... + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... + +freebsd_14_task: + freebsd_instance: + image_family: freebsd-14-0 + install_script: | + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... diff --git a/vendor/github.com/tklauser/numcpus/numcpus_bsd.go b/vendor/github.com/tklauser/numcpus/numcpus_bsd.go index 9e77e38e6e0..efd8db0f1c8 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_bsd.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_bsd.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package numcpus diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go index 1a30525b873..7e75cb06164 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_linux.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go @@ -15,7 +15,6 @@ package numcpus import ( - "io/ioutil" "os" "path/filepath" "strconv" @@ -35,7 +34,7 @@ func getFromCPUAffinity() (int, error) { } func readCPURange(file string) (int, error) { - buf, err := ioutil.ReadFile(filepath.Join(sysfsCPUBasePath, file)) + buf, err := os.ReadFile(filepath.Join(sysfsCPUBasePath, file)) if err != nil { return 0, err } @@ -48,16 +47,16 @@ func parseCPURange(cpus string) (int, error) { if len(cpuRange) == 0 { continue } - rangeOp := strings.SplitN(cpuRange, "-", 2) - first, err := strconv.ParseUint(rangeOp[0], 10, 32) + from, to, found := strings.Cut(cpuRange, "-") + first, err := strconv.ParseUint(from, 10, 32) if err != nil { return 0, err } - if len(rangeOp) == 1 { + if !found { n++ continue } - last, err := strconv.ParseUint(rangeOp[1], 10, 32) + last, err := strconv.ParseUint(to, 10, 32) if err != nil { return 0, err } @@ -89,7 +88,7 @@ func getConfigured() (int, error) { } func getKernelMax() (int, error) { - buf, err := ioutil.ReadFile(filepath.Join(sysfsCPUBasePath, "kernel_max")) + buf, err := os.ReadFile(filepath.Join(sysfsCPUBasePath, "kernel_max")) if err != nil { return 0, err } diff --git a/vendor/github.com/tklauser/numcpus/numcpus_solaris.go b/vendor/github.com/tklauser/numcpus/numcpus_solaris.go index a264323781b..f3b632fe748 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_solaris.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_solaris.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build solaris -// +build solaris package numcpus diff --git a/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go b/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go index 4a0b7c43d21..e72355eca5f 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows package numcpus diff --git a/vendor/github.com/ua-parser/uap-go/LICENSE b/vendor/github.com/ua-parser/uap-go/LICENSE new file mode 100644 index 00000000000..8ceb9a6c0c0 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/LICENSE @@ -0,0 +1,16 @@ +Apache License, Version 2.0 +=========================== + +Copyright 2009 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/.gitignore b/vendor/github.com/ua-parser/uap-go/uaparser/.gitignore new file mode 100644 index 00000000000..385d6a8d0c8 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/.gitignore @@ -0,0 +1,2 @@ +*.out +*.test diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md b/vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md new file mode 100644 index 00000000000..5c7c52930d6 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/LICENSE.md @@ -0,0 +1,8 @@ +The MIT License (MIT) +Copyright (c) 2013 Yihuan Zhou + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/cache.go b/vendor/github.com/ua-parser/uap-go/uaparser/cache.go new file mode 100644 index 00000000000..1838b81e1e2 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/cache.go @@ -0,0 +1,36 @@ +package uaparser + +import lru "github.com/hashicorp/golang-lru" + +// cache caches user-agent properties. +// Without the cache, the parser performs hundreds of expensive regex operations, +// taking 10+ ms. This can lead to significant performance degradation when UA parsing is +// done on a per-request basis. +type cache struct { + device *lru.ARCCache + os *lru.ARCCache + userAgent *lru.ARCCache +} + +func newCache() *cache { + var ( + c cache + err error + ) + const cacheSize = 1024 + // NewARC only fails when cacheSize <= 0. + // Also, returning an error up the stack would break the API. + c.device, err = lru.NewARC(cacheSize) + if err != nil { + panic(err) + } + c.os, err = lru.NewARC(cacheSize) + if err != nil { + panic(err) + } + c.userAgent, err = lru.NewARC(cacheSize) + if err != nil { + panic(err) + } + return &c +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/device.go b/vendor/github.com/ua-parser/uap-go/uaparser/device.go new file mode 100644 index 00000000000..7a115e7511c --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/device.go @@ -0,0 +1,30 @@ +package uaparser + +import "strings" + +type Device struct { + Family string + Brand string + Model string +} + +func (parser *deviceParser) Match(line string, dvc *Device) { + matches := parser.Reg.FindStringSubmatchIndex(line) + + if len(matches) == 0 { + return + } + + dvc.Family = string(parser.Reg.ExpandString(nil, parser.DeviceReplacement, line, matches)) + dvc.Family = strings.TrimSpace(dvc.Family) + + dvc.Brand = string(parser.Reg.ExpandString(nil, parser.BrandReplacement, line, matches)) + dvc.Brand = strings.TrimSpace(dvc.Brand) + + dvc.Model = string(parser.Reg.ExpandString(nil, parser.ModelReplacement, line, matches)) + dvc.Model = strings.TrimSpace(dvc.Model) +} + +func (dvc *Device) ToString() string { + return dvc.Family +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/os.go b/vendor/github.com/ua-parser/uap-go/uaparser/os.go new file mode 100644 index 00000000000..b30e8a193f3 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/os.go @@ -0,0 +1,49 @@ +package uaparser + +type Os struct { + Family string + Major string + Minor string + Patch string + PatchMinor string `yaml:"patch_minor"` +} + +func (parser *osParser) Match(line string, os *Os) { + matches := parser.Reg.FindStringSubmatchIndex(line) + if len(matches) > 0 { + os.Family = string(parser.Reg.ExpandString(nil, parser.OSReplacement, line, matches)) + os.Major = string(parser.Reg.ExpandString(nil, parser.V1Replacement, line, matches)) + os.Minor = string(parser.Reg.ExpandString(nil, parser.V2Replacement, line, matches)) + os.Patch = string(parser.Reg.ExpandString(nil, parser.V3Replacement, line, matches)) + os.PatchMinor = string(parser.Reg.ExpandString(nil, parser.V4Replacement, line, matches)) + } +} + +func (os *Os) ToString() string { + var str string + if os.Family != "" { + str += os.Family + } + version := os.ToVersionString() + if version != "" { + str += " " + version + } + return str +} + +func (os *Os) ToVersionString() string { + var version string + if os.Major != "" { + version += os.Major + } + if os.Minor != "" { + version += "." + os.Minor + } + if os.Patch != "" { + version += "." + os.Patch + } + if os.PatchMinor != "" { + version += "." + os.PatchMinor + } + return version +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/parser.go b/vendor/github.com/ua-parser/uap-go/uaparser/parser.go new file mode 100644 index 00000000000..350fb19635d --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/parser.go @@ -0,0 +1,394 @@ +package uaparser + +import ( + "fmt" + "io/ioutil" + "regexp" + "sort" + "sync" + "sync/atomic" + "time" + + "gopkg.in/yaml.v2" +) + +type RegexesDefinitions struct { + UA []*uaParser `yaml:"user_agent_parsers"` + OS []*osParser `yaml:"os_parsers"` + Device []*deviceParser `yaml:"device_parsers"` + _ [4]byte // padding for alignment + sync.RWMutex +} + +type UserAgentSorter []*uaParser + +func (a UserAgentSorter) Len() int { return len(a) } +func (a UserAgentSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a UserAgentSorter) Less(i, j int) bool { + return atomic.LoadUint64(&a[i].MatchesCount) > atomic.LoadUint64(&a[j].MatchesCount) +} + +type uaParser struct { + Reg *regexp.Regexp + Expr string `yaml:"regex"` + Flags string `yaml:"regex_flag"` + FamilyReplacement string `yaml:"family_replacement"` + V1Replacement string `yaml:"v1_replacement"` + V2Replacement string `yaml:"v2_replacement"` + V3Replacement string `yaml:"v3_replacement"` + _ [4]byte // padding for alignment + MatchesCount uint64 +} + +func (ua *uaParser) setDefaults() { + if ua.FamilyReplacement == "" { + ua.FamilyReplacement = "$1" + } + if ua.V1Replacement == "" { + ua.V1Replacement = "$2" + } + if ua.V2Replacement == "" { + ua.V2Replacement = "$3" + } + if ua.V3Replacement == "" { + ua.V3Replacement = "$4" + } +} + +type OsSorter []*osParser + +func (a OsSorter) Len() int { return len(a) } +func (a OsSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a OsSorter) Less(i, j int) bool { + return atomic.LoadUint64(&a[i].MatchesCount) > atomic.LoadUint64(&a[j].MatchesCount) +} + +type osParser struct { + Reg *regexp.Regexp + Expr string `yaml:"regex"` + Flags string `yaml:"regex_flag"` + OSReplacement string `yaml:"os_replacement"` + V1Replacement string `yaml:"os_v1_replacement"` + V2Replacement string `yaml:"os_v2_replacement"` + V3Replacement string `yaml:"os_v3_replacement"` + V4Replacement string `yaml:"os_v4_replacement"` + _ [4]byte // padding for alignment + MatchesCount uint64 +} + +func (os *osParser) setDefaults() { + if os.OSReplacement == "" { + os.OSReplacement = "$1" + } + if os.V1Replacement == "" { + os.V1Replacement = "$2" + } + if os.V2Replacement == "" { + os.V2Replacement = "$3" + } + if os.V3Replacement == "" { + os.V3Replacement = "$4" + } + if os.V4Replacement == "" { + os.V4Replacement = "$5" + } +} + +type DeviceSorter []*deviceParser + +func (a DeviceSorter) Len() int { return len(a) } +func (a DeviceSorter) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DeviceSorter) Less(i, j int) bool { + return atomic.LoadUint64(&a[i].MatchesCount) > atomic.LoadUint64(&a[j].MatchesCount) +} + +type deviceParser struct { + Reg *regexp.Regexp + Expr string `yaml:"regex"` + Flags string `yaml:"regex_flag"` + DeviceReplacement string `yaml:"device_replacement"` + BrandReplacement string `yaml:"brand_replacement"` + ModelReplacement string `yaml:"model_replacement"` + _ [4]byte // padding for alignment + MatchesCount uint64 +} + +func (device *deviceParser) setDefaults() { + if device.DeviceReplacement == "" { + device.DeviceReplacement = "$1" + } + if device.ModelReplacement == "" { + device.ModelReplacement = "$1" + } +} + +type Client struct { + UserAgent *UserAgent + Os *Os + Device *Device +} + +type Parser struct { + /* atomic operation are done on the following unit64. + * These must be 64bit aligned. On 32bit architectures + * this is only guaranteed to be on the beginning of a struct */ + UserAgentMisses uint64 + OsMisses uint64 + DeviceMisses uint64 + + cache *cache + + RegexesDefinitions + Mode int + UseSort bool + debugMode bool +} + +const ( + EOsLookUpMode = 1 /* 00000001 */ + EUserAgentLookUpMode = 2 /* 00000010 */ + EDeviceLookUpMode = 4 /* 00000100 */ + cMinMissesTreshold = 100000 + cDefaultMissesTreshold = 500000 + cDefaultMatchIdxNotOk = 20 + cDefaultSortOption = false +) + +var ( + missesTreshold = uint64(500000) + matchIdxNotOk = 20 +) + +func (parser *Parser) mustCompile() { // until we can use yaml.UnmarshalYAML with embedded pointer struct + for _, p := range parser.UA { + p.Reg = compileRegex(p.Flags, p.Expr) + p.setDefaults() + } + for _, p := range parser.OS { + p.Reg = compileRegex(p.Flags, p.Expr) + p.setDefaults() + } + for _, p := range parser.Device { + p.Reg = compileRegex(p.Flags, p.Expr) + p.setDefaults() + } +} + +func NewWithOptions(regexFile string, mode, treshold, topCnt int, useSort, debugMode bool) (*Parser, error) { + data, err := ioutil.ReadFile(regexFile) + if nil != err { + return nil, err + } + if topCnt >= 0 { + matchIdxNotOk = topCnt + } + if treshold > cMinMissesTreshold { + missesTreshold = uint64(treshold) + } + parser, err := NewFromBytes(data) + if err != nil { + return nil, err + } + parser.Mode = mode + parser.UseSort = useSort + parser.debugMode = debugMode + return parser, nil +} + +func New(regexFile string) (*Parser, error) { + data, err := ioutil.ReadFile(regexFile) + if nil != err { + return nil, err + } + matchIdxNotOk = cDefaultMatchIdxNotOk + missesTreshold = cDefaultMissesTreshold + parser, err := NewFromBytes(data) + if err != nil { + return nil, err + } + return parser, nil +} + +func NewFromSaved() *Parser { + parser, err := NewFromBytes(DefinitionYaml) + if err != nil { + // if the YAML is malformed, it's a programmatic error inside what + // we've statically-compiled in our binary. Panic! + panic(err.Error()) + } + return parser +} + +func NewFromBytes(data []byte) (*Parser, error) { + parser := &Parser{ + Mode: EOsLookUpMode | EUserAgentLookUpMode | EDeviceLookUpMode, + cache: newCache(), + } + if err := yaml.Unmarshal(data, &parser.RegexesDefinitions); err != nil { + return nil, err + } + + parser.mustCompile() + + return parser, nil +} + +func (parser *Parser) Parse(line string) *Client { + cli := new(Client) + var wg sync.WaitGroup + if EUserAgentLookUpMode&parser.Mode == EUserAgentLookUpMode { + wg.Add(1) + go func() { + defer wg.Done() + parser.RLock() + cli.UserAgent = parser.ParseUserAgent(line) + parser.RUnlock() + }() + } + if EOsLookUpMode&parser.Mode == EOsLookUpMode { + wg.Add(1) + go func() { + defer wg.Done() + parser.RLock() + cli.Os = parser.ParseOs(line) + parser.RUnlock() + }() + } + if EDeviceLookUpMode&parser.Mode == EDeviceLookUpMode { + wg.Add(1) + go func() { + defer wg.Done() + parser.RLock() + cli.Device = parser.ParseDevice(line) + parser.RUnlock() + }() + } + wg.Wait() + if parser.UseSort == true { + checkAndSort(parser) + } + return cli +} + +func (parser *Parser) ParseUserAgent(line string) *UserAgent { + cachedUA, ok := parser.cache.userAgent.Get(line) + if ok { + return cachedUA.(*UserAgent) + } + ua := new(UserAgent) + foundIdx := -1 + found := false + for i, uaPattern := range parser.UA { + uaPattern.Match(line, ua) + if len(ua.Family) > 0 { + found = true + foundIdx = i + atomic.AddUint64(&uaPattern.MatchesCount, 1) + break + } + } + if !found { + ua.Family = "Other" + } + if foundIdx > matchIdxNotOk { + atomic.AddUint64(&parser.UserAgentMisses, 1) + } + parser.cache.userAgent.Add(line, ua) + return ua +} + +func (parser *Parser) ParseOs(line string) *Os { + cachedOS, ok := parser.cache.os.Get(line) + if ok { + return cachedOS.(*Os) + } + + os := new(Os) + foundIdx := -1 + found := false + for i, osPattern := range parser.OS { + osPattern.Match(line, os) + if len(os.Family) > 0 { + found = true + foundIdx = i + atomic.AddUint64(&osPattern.MatchesCount, 1) + break + } + } + if !found { + os.Family = "Other" + } + if foundIdx > matchIdxNotOk { + atomic.AddUint64(&parser.OsMisses, 1) + } + + parser.cache.os.Add(line, os) + return os +} + +func (parser *Parser) ParseDevice(line string) *Device { + cachedDevice, ok := parser.cache.device.Get(line) + if ok { + return cachedDevice.(*Device) + } + + dvc := new(Device) + foundIdx := -1 + found := false + for i, dvcPattern := range parser.Device { + dvcPattern.Match(line, dvc) + if len(dvc.Family) > 0 { + found = true + foundIdx = i + atomic.AddUint64(&dvcPattern.MatchesCount, 1) + break + } + } + if !found { + dvc.Family = "Other" + } + if foundIdx > matchIdxNotOk { + atomic.AddUint64(&parser.DeviceMisses, 1) + } + + parser.cache.device.Add(line, dvc) + return dvc +} + +func checkAndSort(parser *Parser) { + parser.Lock() + if atomic.LoadUint64(&parser.UserAgentMisses) >= missesTreshold { + if parser.debugMode { + fmt.Printf("%s\tSorting UserAgents slice\n", time.Now()) + } + parser.UserAgentMisses = 0 + sort.Sort(UserAgentSorter(parser.UA)) + } + parser.Unlock() + parser.Lock() + if atomic.LoadUint64(&parser.OsMisses) >= missesTreshold { + if parser.debugMode { + fmt.Printf("%s\tSorting OS slice\n", time.Now()) + } + parser.OsMisses = 0 + sort.Sort(OsSorter(parser.OS)) + } + parser.Unlock() + parser.Lock() + if atomic.LoadUint64(&parser.DeviceMisses) >= missesTreshold { + if parser.debugMode { + fmt.Printf("%s\tSorting Device slice\n", time.Now()) + } + parser.DeviceMisses = 0 + sort.Sort(DeviceSorter(parser.Device)) + } + parser.Unlock() +} + +func compileRegex(flags, expr string) *regexp.Regexp { + if flags == "" { + return regexp.MustCompile(expr) + } else { + return regexp.MustCompile(fmt.Sprintf("(?%s)%s", flags, expr)) + } +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go b/vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go new file mode 100644 index 00000000000..ad4073a58fb --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/user_agent.go @@ -0,0 +1,44 @@ +package uaparser + +type UserAgent struct { + Family string + Major string + Minor string + Patch string +} + +func (parser *uaParser) Match(line string, ua *UserAgent) { + matches := parser.Reg.FindStringSubmatchIndex(line) + if len(matches) > 0 { + ua.Family = string(parser.Reg.ExpandString(nil, parser.FamilyReplacement, line, matches)) + ua.Major = string(parser.Reg.ExpandString(nil, parser.V1Replacement, line, matches)) + ua.Minor = string(parser.Reg.ExpandString(nil, parser.V2Replacement, line, matches)) + ua.Patch = string(parser.Reg.ExpandString(nil, parser.V3Replacement, line, matches)) + } +} + +func (ua *UserAgent) ToString() string { + var str string + if ua.Family != "" { + str += ua.Family + } + version := ua.ToVersionString() + if version != "" { + str += " " + version + } + return str +} + +func (ua *UserAgent) ToVersionString() string { + var version string + if ua.Major != "" { + version += ua.Major + } + if ua.Minor != "" { + version += "." + ua.Minor + } + if ua.Patch != "" { + version += "." + ua.Patch + } + return version +} diff --git a/vendor/github.com/ua-parser/uap-go/uaparser/yaml.go b/vendor/github.com/ua-parser/uap-go/uaparser/yaml.go new file mode 100644 index 00000000000..c16729222a2 --- /dev/null +++ b/vendor/github.com/ua-parser/uap-go/uaparser/yaml.go @@ -0,0 +1,3736 @@ +package uaparser + +var DefinitionYaml = []byte(`user_agent_parsers: + - regex: '(GeoEvent Server) (\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: '(ArcGIS Pro)(?: (\d+)\.(\d+)\.([^ ]+)|)' + - regex: 'ArcGIS Client Using WinInet' + family_replacement: 'ArcMap' + - regex: '(OperationsDashboard)-(?:Windows)-(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Operations Dashboard for ArcGIS' + - regex: '(arcgisearth)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Earth' + - regex: 'com.esri.(earth).phone/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Earth' + - regex: '(arcgis-explorer)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Explorer for ArcGIS' + - regex: 'arcgis-(collector|aurora)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Collector for ArcGIS' + - regex: '(arcgis-workforce)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Workforce for ArcGIS' + - regex: '(Collector|Explorer|Workforce)-(?:Android|iOS)-(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: '$1 for ArcGIS' + - regex: '(Explorer|Collector)/(\d+) CFNetwork' + family_replacement: '$1 for ArcGIS' + - regex: 'ArcGISRuntime-(Android|iOS|NET|Qt)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Runtime SDK for $1' + - regex: 'ArcGIS\.?(iOS|Android|NET|Qt)(?:-|\.)(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Runtime SDK for $1' + - regex: 'ArcGIS\.Runtime\.(Qt)\.(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'ArcGIS Runtime SDK for $1' + - regex: '^(Luminary)[Stage]+/(\d+) CFNetwork' + - regex: '(ESPN)[%20| ]+Radio/(\d+)\.(\d+)\.(\d+) CFNetwork' + - regex: '(Antenna)/(\d+) CFNetwork' + family_replacement: 'AntennaPod' + - regex: '(TopPodcasts)Pro/(\d+) CFNetwork' + - regex: '(MusicDownloader)Lite/(\d+)\.(\d+)\.(\d+) CFNetwork' + - regex: '^(.{0,200})-iPad\/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork' + - regex: '^(.{0,200})-iPhone/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork' + - regex: '^(.{0,200})/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|) CFNetwork' + - regex: '^(Luminary)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(espn\.go)' + family_replacement: 'ESPN' + - regex: '(espnradio\.com)' + family_replacement: 'ESPN' + - regex: 'ESPN APP$' + family_replacement: 'ESPN' + - regex: '(audioboom\.com)' + family_replacement: 'AudioBoom' + - regex: ' (Rivo) RHYTHM' + - regex: '(CFNetwork)(?:/(\d+)\.(\d+)(?:\.(\d+)|)|)' + family_replacement: 'CFNetwork' + - regex: '(Pingdom\.com_bot_version_)(\d+)\.(\d+)' + family_replacement: 'PingdomBot' + - regex: '(PingdomTMS)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'PingdomBot' + - regex: '(PingdomPageSpeed)/(\d+)\.(\d+)' + family_replacement: 'PingdomBot' + - regex: ' (PTST)/(\d+)(?:\.(\d+)|)$' + family_replacement: 'WebPageTest.org bot' + - regex: 'X11; (Datanyze); Linux' + - regex: '(NewRelicPinger)/(\d+)\.(\d+)' + family_replacement: 'NewRelicPingerBot' + - regex: '(Tableau)/(\d+)\.(\d+)' + family_replacement: 'Tableau' + - regex: 'AppleWebKit/\d{1,10}\.\d{1,10}.{0,200} Safari.{0,200} (CreativeCloud)/(\d+)\.(\d+).(\d+)' + family_replacement: 'Adobe CreativeCloud' + - regex: '(Salesforce)(?:.)\/(\d+)\.(\d?)' + - regex: '(\(StatusCake\))' + family_replacement: 'StatusCakeBot' + - regex: '(facebookexternalhit)/(\d+)\.(\d+)' + family_replacement: 'FacebookBot' + - regex: 'Google.{0,50}/\+/web/snippet' + family_replacement: 'GooglePlusBot' + - regex: 'via ggpht\.com GoogleImageProxy' + family_replacement: 'GmailImageProxy' + - regex: 'YahooMailProxy; https://help\.yahoo\.com/kb/yahoo-mail-proxy-SLN28749\.html' + family_replacement: 'YahooMailProxy' + - regex: '(Twitterbot)/(\d+)\.(\d+)' + family_replacement: 'Twitterbot' + - regex: '/((?:Ant-|)Nutch|[A-z]+[Bb]ot|[A-z]+[Ss]pider|Axtaris|fetchurl|Isara|ShopSalad|Tailsweep)[ \-](\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '\b(008|Altresium|Argus|BaiduMobaider|BoardReader|DNSGroup|DataparkSearch|EDI|Goodzer|Grub|INGRID|Infohelfer|LinkedInBot|LOOQ|Nutch|OgScrper|Pandora|PathDefender|Peew|PostPost|Steeler|Twitterbot|VSE|WebCrunch|WebZIP|Y!J-BR[A-Z]|YahooSeeker|envolk|sproose|wminer)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(MSIE) (\d+)\.(\d+)([a-z]\d|[a-z]|);.{0,200} MSIECrawler' + family_replacement: 'MSIECrawler' + - regex: '(DAVdroid)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Google-HTTP-Java-Client|Apache-HttpClient|PostmanRuntime|Go-http-client|scalaj-http|http%20client|Python-urllib|HttpMonitor|TLSProber|WinHTTP|JNLP|okhttp|aihttp|reqwest|axios|unirest-(?:java|python|ruby|nodejs|php|net))(?:[ /](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '(Pinterest(?:bot|))/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)[;\s(]+\+https://www.pinterest.com/bot.html' + family_replacement: 'Pinterestbot' + - regex: '(CSimpleSpider|Cityreview Robot|CrawlDaddy|CrawlFire|Finderbots|Index crawler|Job Roboter|KiwiStatus Spider|Lijit Crawler|QuerySeekerSpider|ScollSpider|Trends Crawler|USyd-NLP-Spider|SiteCat Webbot|BotName\/\$BotVersion|123metaspider-Bot|1470\.net crawler|50\.nu|8bo Crawler Bot|Aboundex|Accoona-[A-z]{1,30}-Agent|AdsBot-Google(?:-[a-z]{1,30}|)|altavista|AppEngine-Google|archive.{0,30}\.org_bot|archiver|Ask Jeeves|[Bb]ai[Dd]u[Ss]pider(?:-[A-Za-z]{1,30})(?:-[A-Za-z]{1,30}|)|bingbot|BingPreview|blitzbot|BlogBridge|Bloglovin|BoardReader Blog Indexer|BoardReader Favicon Fetcher|boitho.com-dc|BotSeer|BUbiNG|\b\w{0,30}favicon\w{0,30}\b|\bYeti(?:-[a-z]{1,30}|)|Catchpoint(?: bot|)|[Cc]harlotte|Checklinks|clumboot|Comodo HTTP\(S\) Crawler|Comodo-Webinspector-Crawler|ConveraCrawler|CRAWL-E|CrawlConvera|Daumoa(?:-feedfetcher|)|Feed Seeker Bot|Feedbin|findlinks|Flamingo_SearchEngine|FollowSite Bot|furlbot|Genieo|gigabot|GomezAgent|gonzo1|(?:[a-zA-Z]{1,30}-|)Googlebot(?:-[a-zA-Z]{1,30}|)|Google SketchUp|grub-client|gsa-crawler|heritrix|HiddenMarket|holmes|HooWWWer|htdig|ia_archiver|ICC-Crawler|Icarus6j|ichiro(?:/mobile|)|IconSurf|IlTrovatore(?:-Setaccio|)|InfuzApp|Innovazion Crawler|InternetArchive|IP2[a-z]{1,30}Bot|jbot\b|KaloogaBot|Kraken|Kurzor|larbin|LEIA|LesnikBot|Linguee Bot|LinkAider|LinkedInBot|Lite Bot|Llaut|lycos|Mail\.RU_Bot|masscan|masidani_bot|Mediapartners-Google|Microsoft .{0,30} Bot|mogimogi|mozDex|MJ12bot|msnbot(?:-media {0,2}|)|msrbot|Mtps Feed Aggregation System|netresearch|Netvibes|NewsGator[^/]{0,30}|^NING|Nutch[^/]{0,30}|Nymesis|ObjectsSearch|OgScrper|Orbiter|OOZBOT|PagePeeker|PagesInventory|PaxleFramework|Peeplo Screenshot Bot|PHPCrawl|PlantyNet_WebRobot|Pompos|Qwantify|Read%20Later|Reaper|RedCarpet|Retreiver|Riddler|Rival IQ|scooter|Scrapy|Scrubby|searchsight|seekbot|semanticdiscovery|SemrushBot|Simpy|SimplePie|SEOstats|SimpleRSS|SiteCon|Slackbot-LinkExpanding|Slack-ImgProxy|Slurp|snappy|Speedy Spider|Squrl Java|Stringer|TheUsefulbot|ThumbShotsBot|Thumbshots\.ru|Tiny Tiny RSS|Twitterbot|WhatsApp|URL2PNG|Vagabondo|VoilaBot|^vortex|Votay bot|^voyager|WASALive.Bot|Web-sniffer|WebThumb|WeSEE:[A-z]{1,30}|WhatWeb|WIRE|WordPress|Wotbox|www\.almaden\.ibm\.com|Xenu(?:.s|) Link Sleuth|Xerka [A-z]{1,30}Bot|yacy(?:bot|)|YahooSeeker|Yahoo! Slurp|Yandex\w{1,30}|YodaoBot(?:-[A-z]{1,30}|)|YottaaMonitor|Yowedo|^Zao|^Zao-Crawler|ZeBot_www\.ze\.bz|ZooShot|ZyBorg|ArcGIS Hub Indexer)(?:[ /]v?(\d+)(?:\.(\d+)(?:\.(\d+)|)|)|)' + - regex: '\b(Boto3?|JetS3t|aws-(?:cli|sdk-(?:cpp|go|go-v\d|java|nodejs|ruby2?|dotnet-(?:\d{1,2}|core)))|s3fs)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(FME)\/(\d+\.\d+)\.(\d+)\.(\d+)' + - regex: '(QGIS)\/(\d)\.?0?(\d{1,2})\.?0?(\d{1,2})' + - regex: '(JOSM)/(\d+)\.(\d+)' + - regex: '(Tygron Platform) \((\d+)\.(\d+)\.(\d+(?:\.\d+| RC \d+\.\d+))' + - regex: '\[(FBAN/MessengerForiOS|FB_IAB/MESSENGER);FBAV/(\d+)(?:\.(\d+)(?:\.(\d+)(?:\.(\d+)|)|)|)' + family_replacement: 'Facebook Messenger' + - regex: '\[FB.{0,300};(FBAV)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Facebook' + - regex: '\[FB.{0,300};' + family_replacement: 'Facebook' + - regex: '(RecipeRadar)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '^.{0,200}?(?:\/[A-Za-z0-9\.]{0,50}|) {0,2}([A-Za-z0-9 \-_\!\[\]:]{0,50}(?:[Aa]rchiver|[Ii]ndexer|[Ss]craper|[Bb]ot|[Ss]pider|[Cc]rawl[a-z]{0,50}))[/ ](\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: '^.{0,200}?((?:[A-Za-z][A-Za-z0-9 -]{0,50}|)[^C][^Uu][Bb]ot)\b(?:(?:[ /]| v)(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '^.{0,200}?((?:[A-z0-9]{1,50}|[A-z\-]{1,50} ?|)(?: the |)(?:[Ss][Pp][Ii][Dd][Ee][Rr]|[Ss]crape|[Cc][Rr][Aa][Ww][Ll])[A-z0-9]{0,50})(?:(?:[ /]| v)(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '(HbbTV)/(\d+)\.(\d+)\.(\d+) \(' + - regex: '(Chimera|SeaMonkey|Camino|Waterfox)/(\d+)\.(\d+)\.?([ab]?\d+[a-z]*|)' + - regex: '(SailfishBrowser)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Sailfish Browser' + - regex: '\[(Pinterest)/[^\]]{1,50}\]' + - regex: '(Pinterest)(?: for Android(?: Tablet|)|)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Instagram).(\d+)\.(\d+)\.(\d+)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Flipboard).(\d+)\.(\d+)\.(\d+)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Flipboard-Briefing).(\d+)\.(\d+)\.(\d+)' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Onefootball)\/Android.(\d+)\.(\d+)\.(\d+)' + - regex: '(Snapchat)\/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(Twitter for (?:iPhone|iPad)|TwitterAndroid)(?:\/(\d+)\.(\d+)|)' + family_replacement: 'Twitter' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Phantom\/ios|Phantom\/android).(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Phantom' + - regex: 'Mozilla.{1,100}Mobile.{1,100}(AspiegelBot|PetalBot)' + family_replacement: 'Spider' + - regex: 'AspiegelBot|PetalBot' + family_replacement: 'Spider' + - regex: '(Firefox)/(\d+)\.(\d+) Basilisk/(\d+)' + family_replacement: 'Basilisk' + - regex: '(PaleMoon)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Pale Moon' + - regex: '(Fennec)/(\d+)\.(\d+)\.?([ab]?\d+[a-z]*)' + family_replacement: 'Firefox Mobile' + - regex: '(Fennec)/(\d+)\.(\d+)(pre)' + family_replacement: 'Firefox Mobile' + - regex: '(Fennec)/(\d+)\.(\d+)' + family_replacement: 'Firefox Mobile' + - regex: '(?:Mobile|Tablet);.{0,200}(Firefox)/(\d+)\.(\d+)' + family_replacement: 'Firefox Mobile' + - regex: '(Namoroka|Shiretoko|Minefield)/(\d+)\.(\d+)\.(\d+(?:pre|))' + family_replacement: 'Firefox ($1)' + - regex: '(Firefox)/(\d+)\.(\d+)(a\d+[a-z]*)' + family_replacement: 'Firefox Alpha' + - regex: '(Firefox)/(\d+)\.(\d+)(b\d+[a-z]*)' + family_replacement: 'Firefox Beta' + - regex: '(Firefox)-(?:\d+\.\d+|)/(\d+)\.(\d+)(a\d+[a-z]*)' + family_replacement: 'Firefox Alpha' + - regex: '(Firefox)-(?:\d+\.\d+|)/(\d+)\.(\d+)(b\d+[a-z]*)' + family_replacement: 'Firefox Beta' + - regex: '(Namoroka|Shiretoko|Minefield)/(\d+)\.(\d+)([ab]\d+[a-z]*|)' + family_replacement: 'Firefox ($1)' + - regex: '(Firefox).{0,200}Tablet browser (\d+)\.(\d+)\.(\d+)' + family_replacement: 'MicroB' + - regex: '(MozillaDeveloperPreview)/(\d+)\.(\d+)([ab]\d+[a-z]*|)' + - regex: '(FxiOS)/(\d+)\.(\d+)(\.(\d+)|)(\.(\d+)|)' + family_replacement: 'Firefox iOS' + - regex: '(Flock)/(\d+)\.(\d+)(b\d+?)' + - regex: '(RockMelt)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Navigator)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Netscape' + - regex: '(Navigator)/(\d+)\.(\d+)([ab]\d+)' + family_replacement: 'Netscape' + - regex: '(Netscape6)/(\d+)\.(\d+)\.?([ab]?\d+|)' + family_replacement: 'Netscape' + - regex: '(MyIBrow)/(\d+)\.(\d+)' + family_replacement: 'My Internet Browser' + - regex: '(UC? ?Browser|UCWEB|U3)[ /]?(\d+)\.(\d+)\.(\d+)' + family_replacement: 'UC Browser' + - regex: '(Opera Tablet).{0,200}Version/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Opera Mini)(?:/att|)/?(\d+|)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Opera)/.{1,100}Opera Mobi.{1,100}Version/(\d+)\.(\d+)' + family_replacement: 'Opera Mobile' + - regex: '(Opera)/(\d+)\.(\d+).{1,100}Opera Mobi' + family_replacement: 'Opera Mobile' + - regex: 'Opera Mobi.{1,100}(Opera)(?:/|\s+)(\d+)\.(\d+)' + family_replacement: 'Opera Mobile' + - regex: 'Opera Mobi' + family_replacement: 'Opera Mobile' + - regex: '(Opera)/9.80.{0,200}Version/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(?:Mobile Safari).{1,300}(OPR)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Opera Mobile' + - regex: '(?:Chrome).{1,300}(OPR)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Opera' + - regex: '(Coast)/(\d+).(\d+).(\d+)' + family_replacement: 'Opera Coast' + - regex: '(OPiOS)/(\d+).(\d+).(\d+)' + family_replacement: 'Opera Mini' + - regex: 'Chrome/.{1,200}( MMS)/(\d+).(\d+).(\d+)' + family_replacement: 'Opera Neon' + - regex: '(hpw|web)OS/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'webOS Browser' + - regex: '(luakit)' + family_replacement: 'LuaKit' + - regex: '(Snowshoe)/(\d+)\.(\d+).(\d+)' + - regex: 'Gecko/\d+ (Lightning)/(\d+)\.(\d+)\.?((?:[ab]?\d+[a-z]*)|(?:\d*))' + - regex: '(Firefox)/(\d+)\.(\d+)\.(\d+(?:pre|)) \(Swiftfox\)' + family_replacement: 'Swiftfox' + - regex: '(Firefox)/(\d+)\.(\d+)([ab]\d+[a-z]*|) \(Swiftfox\)' + family_replacement: 'Swiftfox' + - regex: '(rekonq)/(\d+)\.(\d+)(?:\.(\d+)|) Safari' + family_replacement: 'Rekonq' + - regex: 'rekonq' + family_replacement: 'Rekonq' + - regex: '(conkeror|Conkeror)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Conkeror' + - regex: '(konqueror)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Konqueror' + - regex: '(WeTab)-Browser' + - regex: '(Comodo_Dragon)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Comodo Dragon' + - regex: '(Symphony) (\d+).(\d+)' + - regex: 'PLAYSTATION 3.{1,200}WebKit' + family_replacement: 'NetFront NX' + - regex: 'PLAYSTATION 3' + family_replacement: 'NetFront' + - regex: '(PlayStation Portable)' + family_replacement: 'NetFront' + - regex: '(PlayStation Vita)' + family_replacement: 'NetFront NX' + - regex: 'AppleWebKit.{1,200} (NX)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'NetFront NX' + - regex: '(Nintendo 3DS)' + family_replacement: 'NetFront NX' + - regex: '(HuaweiBrowser)/(\d+)\.(\d+)\.(\d+)\.\d+' + family_replacement: 'Huawei Browser' + - regex: '(AVG)/(\d+)\.(\d+)\.(\d+)\.\d+' + family_replacement: 'AVG' + - regex: '(AvastSecureBrowser|Avast)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Avast Secure Browser' + - regex: '(Instabridge)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(AlohaBrowser)/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Aloha Browser' + - regex: '((?:B|b)rave(?:\sChrome)?)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Brave' + - regex: '(Silk)/(\d+)\.(\d+)(?:\.([0-9\-]+)|)' + family_replacement: 'Amazon Silk' + - regex: '(Puffin)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: 'Windows Phone .{0,200}(Edge)/(\d+)\.(\d+)' + family_replacement: 'Edge Mobile' + - regex: '(EdgiOS|EdgA)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Edge Mobile' + - regex: '(OculusBrowser)/(\d+)\.(\d+).0.0(?:\.([0-9\-]+)|)' + family_replacement: 'Oculus Browser' + - regex: '(SamsungBrowser)/(\d+)\.(\d+)' + family_replacement: 'Samsung Internet' + - regex: '(SznProhlizec)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Seznam prohlížeč' + - regex: '(coc_coc_browser)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Coc Coc' + - regex: '(baidubrowser)[/\s](\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Baidu Browser' + - regex: '(FlyFlow)/(\d+)\.(\d+)' + family_replacement: 'Baidu Explorer' + - regex: '(MxBrowser)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Maxthon' + - regex: '(Crosswalk)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(Line)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'LINE' + - regex: '(MiuiBrowser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'MiuiBrowser' + - regex: '(Mint Browser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Mint Browser' + - regex: '(TopBuzz)/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: 'Mozilla.{1,200}Android.{1,200}(GSA)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Google' + - regex: '(MQQBrowser/Mini)(?:(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + family_replacement: 'QQ Browser Mini' + - regex: '(MQQBrowser)(?:/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + family_replacement: 'QQ Browser Mobile' + - regex: '(QQBrowser)(?:/(\d+)(?:\.(\d+)\.(\d+)(?:\.(\d+)|)|)|)' + family_replacement: 'QQ Browser' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(DuckDuckGo)/(\d+)' + family_replacement: 'DuckDuckGo Mobile' + - regex: 'Mozilla.{1,200}(DuckDuckGo)/(\d+)' + family_replacement: 'DuckDuckGo' + - regex: 'Mozilla.{1,200}Mobile.{1,100}(Ddg)/(\d+)(?:\.(\d+)|)' + family_replacement: 'DuckDuckGo Mobile' + - regex: 'Mozilla.{1,200}(Ddg)/(\d+)(?:\.(\d+)|)' + family_replacement: 'DuckDuckGo' + - regex: '(Tenta/)(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Tenta Browser' + - regex: '(Ecosia) ios@(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Ecosia iOS' + - regex: '(Ecosia) android@(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Ecosia Android' + - regex: '(VivoBrowser)\/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(HiBrowser)\/v(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: 'Version/.{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile WebView' + - regex: '; wv\).{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile WebView' + - regex: '(CrMo)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile' + - regex: '(CriOS)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Chrome Mobile iOS' + - regex: '(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+) Mobile(?:[ /]|$)' + family_replacement: 'Chrome Mobile' + - regex: ' Mobile .{1,300}(Chrome)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Mobile' + - regex: '(chromeframe)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Chrome Frame' + - regex: '(SLP Browser)/(\d+)\.(\d+)' + family_replacement: 'Tizen Browser' + - regex: '(SE 2\.X) MetaSr (\d+)\.(\d+)' + family_replacement: 'Sogou Explorer' + - regex: '(Rackspace Monitoring)/(\d+)\.(\d+)' + family_replacement: 'RackspaceBot' + - regex: '(PRTG Network Monitor)' + - regex: '(PyAMF)/(\d+)\.(\d+)\.(\d+)' + - regex: '(YaBrowser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Yandex Browser' + - regex: '(YaSearchBrowser)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Yandex Browser' + - regex: '(Chrome)/(\d+)\.(\d+)\.(\d+).{0,100} MRCHROME' + family_replacement: 'Mail.ru Chromium Browser' + - regex: '(AOL) (\d+)\.(\d+); AOLBuild (\d+)' + - regex: '(PodCruncher|Downcast)[ /]?(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: ' (BoxNotes)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Whale)/(\d+)\.(\d+)\.(\d+)\.(\d+) Mobile(?:[ /]|$)' + family_replacement: 'Whale' + - regex: '(Whale)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Whale' + - regex: '(1Password)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Ghost)/(\d+)\.(\d+)\.(\d+)' + - regex: 'PAN (GlobalProtect)/(\d+)\.(\d+)\.(\d+) .{1,100} \(X11; Linux x86_64\)' + - regex: '^(surveyon)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Surveyon' + - regex: '(Slack_SSB)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Slack Desktop Client' + - regex: '(HipChat)/?(\d+|)' + family_replacement: 'HipChat Desktop Client' + - regex: '\b(MobileIron|FireWeb|Jasmine|ANTGalio|Midori|Fresco|Lobo|PaleMoon|Maxthon|Lynx|OmniWeb|Dillo|Camino|Demeter|Fluid|Fennec|Epiphany|Shiira|Sunrise|Spotify|Flock|Netscape|Lunascape|WebPilot|NetFront|Netfront|Konqueror|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|Opera Mini|iCab|NetNewsWire|ThunderBrowse|Iris|UP\.Browser|Bunjalloo|Google Earth|Raven for Mac|Openwave|MacOutlook|Electron|OktaMobile)/(\d+)\.(\d+)\.(\d+)' + - regex: 'Microsoft Office Outlook 12\.\d+\.\d+|MSOffice 12' + family_replacement: 'Outlook' + v1_replacement: '2007' + - regex: 'Microsoft Outlook 14\.\d+\.\d+|MSOffice 14' + family_replacement: 'Outlook' + v1_replacement: '2010' + - regex: 'Microsoft Outlook 15\.\d+\.\d+' + family_replacement: 'Outlook' + v1_replacement: '2013' + - regex: 'Microsoft Outlook (?:Mail )?16\.\d+\.\d+|MSOffice 16' + family_replacement: 'Outlook' + v1_replacement: '2016' + - regex: 'Microsoft Office (Word) 2014' + - regex: 'Outlook-Express\/7\.0' + family_replacement: 'Windows Live Mail' + - regex: '(Airmail) (\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Thunderbird)/(\d+)\.(\d+)(?:\.(\d+(?:pre|))|)' + family_replacement: 'Thunderbird' + - regex: '(Postbox)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Postbox' + - regex: '(Barca(?:Pro)?)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Barca' + - regex: '(Lotus-Notes)/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Lotus Notes' + - regex: 'Superhuman' + family_replacement: 'Superhuman' + - regex: '(Vivaldi)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Edge?)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)(?:\.(\d+)|)' + family_replacement: 'Edge' + - regex: '(Chrome)/(\d+)\.(\d+)\.(\d+)[\d.]{0,100} Iron[^/]' + family_replacement: 'Iron' + - regex: '\b(Dolphin)(?: |HDCN/|/INT\-)(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(HeadlessChrome)(?:/(\d+)\.(\d+)\.(\d+)|)' + - regex: '(Evolution)/(\d+)\.(\d+)\.(\d+\.\d+)' + - regex: '(RCM CardDAV plugin)/(\d+)\.(\d+)\.(\d+(?:-dev|))' + - regex: '(bingbot|Bolt|AdobeAIR|Jasmine|IceCat|Skyfire|Midori|Maxthon|Lynx|Arora|IBrowse|Dillo|Camino|Shiira|Fennec|Phoenix|Flock|Netscape|Lunascape|Epiphany|WebPilot|Opera Mini|Opera|NetFront|Netfront|Konqueror|Googlebot|SeaMonkey|Kazehakase|Vienna|Iceape|Iceweasel|IceWeasel|Iron|K-Meleon|Sleipnir|Galeon|GranParadiso|iCab|iTunes|MacAppStore|NetNewsWire|Space Bison|Stainless|Orca|Dolfin|BOLT|Minimo|Tizen Browser|Polaris|Abrowser|Planetweb|ICE Browser|mDolphin|qutebrowser|Otter|QupZilla|MailBar|kmail2|YahooMobileMail|ExchangeWebServices|ExchangeServicesClient|Dragon|Outlook-iOS-Android)/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(Chromium|Chrome)/(\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(IEMobile)[ /](\d+)\.(\d+)' + family_replacement: 'IE Mobile' + - regex: '(BacaBerita App)\/(\d+)\.(\d+)\.(\d+)' + - regex: '^(bPod|Pocket Casts|Player FM)$' + - regex: '^(AlexaMediaPlayer|VLC)/(\d+)\.(\d+)\.([^.\s]+)' + - regex: '^(AntennaPod|WMPlayer|Zune|Podkicker|Radio|ExoPlayerDemo|Overcast|PocketTunes|NSPlayer|okhttp|DoggCatcher|QuickNews|QuickTime|Peapod|Podcasts|GoldenPod|VLC|Spotify|Miro|MediaGo|Juice|iPodder|gPodder|Banshee)/(\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '^(Peapod|Liferea)/([^.\s]+)\.([^.\s]+|)\.?([^.\s]+|)' + - regex: '^(bPod|Player FM) BMID/(\S+)' + - regex: '^(Podcast ?Addict)/v(\d+) ' + - regex: '^(Podcast ?Addict) ' + family_replacement: 'PodcastAddict' + - regex: '(Replay) AV' + - regex: '(VOX) Music Player' + - regex: '(CITA) RSS Aggregator/(\d+)\.(\d+)' + - regex: '(Pocket Casts)$' + - regex: '(Player FM)$' + - regex: '(LG Player|Doppler|FancyMusic|MediaMonkey|Clementine) (\d+)\.(\d+)\.?([^.\s]+|)\.?([^.\s]+|)' + - regex: '(philpodder)/(\d+)\.(\d+)\.?([^.\s]+|)\.?([^.\s]+|)' + - regex: '(Player FM|Pocket Casts|DoggCatcher|Spotify|MediaMonkey|MediaGo|BashPodder)' + - regex: '(QuickTime)\.(\d+)\.(\d+)\.(\d+)' + - regex: '(Kinoma)(\d+)' + - regex: '(Fancy) Cloud Music (\d+)\.(\d+)' + family_replacement: 'FancyMusic' + - regex: 'EspnDownloadManager' + family_replacement: 'ESPN' + - regex: '(ESPN) Radio (\d+)\.(\d+)(?:\.(\d+)|) ?(?:rv:(\d+)|) ' + - regex: '(podracer|jPodder) v ?(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '(ZDM)/(\d+)\.(\d+)[; ]?' + - regex: '(Zune|BeyondPod) (\d+)(?:\.(\d+)|)[\);]' + - regex: '(WMPlayer)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + - regex: '^(Lavf)' + family_replacement: 'WMPlayer' + - regex: '^(RSSRadio)[ /]?(\d+|)' + - regex: '(RSS_Radio) (\d+)\.(\d+)' + family_replacement: 'RSSRadio' + - regex: '(Podkicker) \S+/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Podkicker' + - regex: '^(HTC) Streaming Player \S+ / \S+ / \S+ / (\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '^(Stitcher)/iOS' + - regex: '^(Stitcher)/Android' + - regex: '^(VLC) .{0,200}version (\d+)\.(\d+)\.(\d+)' + - regex: ' (VLC) for' + - regex: '(vlc)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'VLC' + - regex: '^(foobar)\S{1,10}/(\d+)\.(\d+|)\.?([\da-z]+|)' + - regex: '^(Clementine)\S{1,10} (\d+)\.(\d+|)\.?(\d+|)' + - regex: '(amarok)/(\d+)\.(\d+|)\.?(\d+|)' + family_replacement: 'Amarok' + - regex: '(Custom)-Feed Reader' + - regex: '(iRider|Crazy Browser|SkipStone|iCab|Lunascape|Sleipnir|Maemo Browser) (\d+)\.(\d+)\.(\d+)' + - regex: '(iCab|Lunascape|Opera|Android|Jasmine|Polaris|Microsoft SkyDriveSync|The Bat!) (\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Kindle)/(\d+)\.(\d+)' + - regex: '(Android) Donut' + v1_replacement: '1' + v2_replacement: '2' + - regex: '(Android) Eclair' + v1_replacement: '2' + v2_replacement: '1' + - regex: '(Android) Froyo' + v1_replacement: '2' + v2_replacement: '2' + - regex: '(Android) Gingerbread' + v1_replacement: '2' + v2_replacement: '3' + - regex: '(Android) Honeycomb' + v1_replacement: '3' + - regex: '(MSIE) (\d+)\.(\d+).{0,100}XBLWP7' + family_replacement: 'IE Large Screen' + - regex: '(Nextcloud)' + - regex: '(mirall)/(\d+)\.(\d+)\.(\d+)' + - regex: '(ownCloud-android)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Owncloud' + - regex: '(OC)/(\d+)\.(\d+)\.(\d+)\.(\d+) \(Skype for Business\)' + family_replacement: 'Skype' + - regex: '(OpenVAS)(?:-VT)?(?:[ \/](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + family_replacement: 'OpenVAS Scanner' + - regex: '(AnyConnect)\/(\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: 'compatible; monitis' + family_replacement: 'Monitis' + - regex: '(Obigo)InternetBrowser' + - regex: '(Obigo)\-Browser' + - regex: '(Obigo|OBIGO)[^\d]*(\d+)(?:.(\d+)|)' + family_replacement: 'Obigo' + - regex: '(MAXTHON|Maxthon) (\d+)\.(\d+)' + family_replacement: 'Maxthon' + - regex: '(Maxthon|MyIE2|Uzbl|Shiira)' + v1_replacement: '0' + - regex: '(BrowseX) \((\d+)\.(\d+)\.(\d+)' + - regex: '(NCSA_Mosaic)/(\d+)\.(\d+)' + family_replacement: 'NCSA Mosaic' + - regex: '(POLARIS)/(\d+)\.(\d+)' + family_replacement: 'Polaris' + - regex: '(Embider)/(\d+)\.(\d+)' + family_replacement: 'Polaris' + - regex: '(BonEcho)/(\d+)\.(\d+)\.?([ab]?\d+|)' + family_replacement: 'Bon Echo' + - regex: '(TopBuzz) com.alex.NewsMaster/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: '(TopBuzz) com.mobilesrepublic.newsrepublic/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: '(TopBuzz) com.topbuzz.videoen/(\d+).(\d+).(\d+)' + family_replacement: 'TopBuzz' + - regex: '(iPod|iPhone|iPad).{1,200}GSA/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|) Mobile' + family_replacement: 'Google' + - regex: '(iPod|iPhone|iPad).{1,200}Version/(\d+)\.(\d+)(?:\.(\d+)|).{1,200}[ +]Safari' + family_replacement: 'Mobile Safari' + - regex: '(iPod|iPod touch|iPhone|iPad);.{0,30}CPU.{0,30}OS[ +](\d+)_(\d+)(?:_(\d+)|).{0,30} AppleNews\/\d+\.\d+(?:\.\d+|)' + family_replacement: 'Mobile Safari UI/WKWebView' + - regex: '(iPod|iPhone|iPad).{1,200}Version/(\d+)\.(\d+)(?:\.(\d+)|)' + family_replacement: 'Mobile Safari UI/WKWebView' + - regex: '(iPod|iPod touch|iPhone|iPad).{0,200} Safari' + family_replacement: 'Mobile Safari' + - regex: '(iPod|iPod touch|iPhone|iPad)' + family_replacement: 'Mobile Safari UI/WKWebView' + - regex: '(Watch)(\d+),(\d+)' + family_replacement: 'Apple $1 App' + - regex: '(Outlook-iOS)/\d+\.\d+\.prod\.iphone \((\d+)\.(\d+)\.(\d+)\)' + - regex: '(AvantGo) (\d+).(\d+)' + - regex: '(OneBrowser)/(\d+).(\d+)' + family_replacement: 'ONE Browser' + - regex: '(Avant)' + v1_replacement: '1' + - regex: '(QtCarBrowser)' + v1_replacement: '1' + - regex: '^(iBrowser/Mini)(\d+).(\d+)' + family_replacement: 'iBrowser Mini' + - regex: '^(iBrowser|iRAPP)/(\d+).(\d+)' + - regex: '^(Nokia)' + family_replacement: 'Nokia Services (WAP) Browser' + - regex: '(NokiaBrowser)/(\d+)\.(\d+).(\d+)\.(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(NokiaBrowser)/(\d+)\.(\d+).(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(NokiaBrowser)/(\d+)\.(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(BrowserNG)/(\d+)\.(\d+).(\d+)' + family_replacement: 'Nokia Browser' + - regex: '(Series60)/5\.0' + family_replacement: 'Nokia Browser' + v1_replacement: '7' + v2_replacement: '0' + - regex: '(Series60)/(\d+)\.(\d+)' + family_replacement: 'Nokia OSS Browser' + - regex: '(S40OviBrowser)/(\d+)\.(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Ovi Browser' + - regex: '(Nokia)[EN]?(\d+)' + - regex: '(PlayBook).{1,200}RIM Tablet OS (\d+)\.(\d+)\.(\d+)' + family_replacement: 'BlackBerry WebKit' + - regex: '(Black[bB]erry|BB10).{1,200}Version/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'BlackBerry WebKit' + - regex: '(Black[bB]erry)\s?(\d+)' + family_replacement: 'BlackBerry' + - regex: '(OmniWeb)/v(\d+)\.(\d+)' + - regex: '(Blazer)/(\d+)\.(\d+)' + family_replacement: 'Palm Blazer' + - regex: '(Pre)/(\d+)\.(\d+)' + family_replacement: 'Palm Pre' + - regex: '(ELinks)/(\d+)\.(\d+)' + - regex: '(ELinks) \((\d+)\.(\d+)' + - regex: '(Links) \((\d+)\.(\d+)' + - regex: '(QtWeb) Internet Browser/(\d+)\.(\d+)' + - regex: '(PhantomJS)/(\d+)\.(\d+)\.(\d+)' + - regex: '(AppleWebKit)/(\d+)(?:\.(\d+)|)\+ .{0,200} Safari' + family_replacement: 'WebKit Nightly' + - regex: '(Version)/(\d+)\.(\d+)(?:\.(\d+)|).{0,100}Safari/' + family_replacement: 'Safari' + - regex: '(Safari)/\d+' + - regex: '(OLPC)/Update(\d+)\.(\d+)' + - regex: '(OLPC)/Update()\.(\d+)' + v1_replacement: '0' + - regex: '(SEMC\-Browser)/(\d+)\.(\d+)' + - regex: '(Teleca)' + family_replacement: 'Teleca Browser' + - regex: '(Phantom)/V(\d+)\.(\d+)' + family_replacement: 'Phantom Browser' + - regex: '(Trident)/(7|8)\.(0)' + family_replacement: 'IE' + v1_replacement: '11' + - regex: '(Trident)/(6)\.(0)' + family_replacement: 'IE' + v1_replacement: '10' + - regex: '(Trident)/(5)\.(0)' + family_replacement: 'IE' + v1_replacement: '9' + - regex: '(Trident)/(4)\.(0)' + family_replacement: 'IE' + v1_replacement: '8' + - regex: '(Espial)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(AppleWebKit)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Apple Mail' + - regex: '(Firefox)/(\d+)\.(\d+)(?:\.(\d+)|$)' + - regex: '(Firefox)/(\d+)\.(\d+)(pre|[ab]\d+[a-z]*|)' + - regex: '([MS]?IE) (\d+)\.(\d+)' + family_replacement: 'IE' + - regex: '(python-requests)/(\d+)\.(\d+)' + family_replacement: 'Python Requests' + - regex: '\b(Windows-Update-Agent|WindowsPowerShell|Microsoft-CryptoAPI|SophosUpdateManager|SophosAgent|Debian APT-HTTP|Ubuntu APT-HTTP|libcurl-agent|libwww-perl|urlgrabber|curl|PycURL|Wget|wget2|aria2|Axel|OpenBSD ftp|lftp|jupdate|insomnia|fetch libfetch|akka-http|got|CloudCockpitBackend|ReactorNetty|axios|Jersey|Vert.x-WebClient|Apache-CXF|Go-CF-client|go-resty|AHC|HTTPie)(?:[ /](\d+)(?:\.(\d+)|)(?:\.(\d+)|)|)' + - regex: '^(cf)\/(\d+)\.(\d+)\.(\S+)' + family_replacement: 'CloudFoundry' + - regex: '^(sap-leonardo-iot-sdk-nodejs) \/ (\d+)\.(\d+)\.(\d+)' + - regex: '^(SAP NetWeaver Application Server) \(1.0;(\d{1})(\d{2})\)' + - regex: '^(\w+-HTTPClient)\/(\d+)\.(\d+)-(\S+)' + family_replacement: 'HTTPClient' + - regex: '^(go-cli)\s(\d+)\.(\d+).(\S+)' + - regex: '^(Java-EurekaClient|Java-EurekaClient-Replication|HTTPClient|lua-resty-http)\/v?(\d+)\.(\d+)\.?(\d*)' + - regex: '^(ping-service|sap xsuaa|Node-oauth|Site24x7|SAP CPI|JAEGER_SECURITY)' + - regex: '(Python/3\.\d{1,3} aiohttp)/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Python aiohttp' + - regex: '(Java)[/ ]?\d{1}\.(\d+)\.(\d+)[_-]*([a-zA-Z0-9]+|)' + - regex: '(Java)[/ ]?(\d+)\.(\d+)\.(\d+)' + - regex: '(minio-go)/v(\d+)\.(\d+)\.(\d+)' + - regex: '^(ureq)[/ ](\d+)\.(\d+).(\d+)' + - regex: '^(http\.rb)/(\d+)\.(\d+).(\d+)' + - regex: '^(GuzzleHttp)/(\d+)\.(\d+).(\d+)' + - regex: '^(grab)\b' + - regex: '^(Cyberduck)/(\d+)\.(\d+)\.(\d+)(?:\.\d+|)' + - regex: '^(S3 Browser) (\d+)[.-](\d+)[.-](\d+)(?:\s*https?://s3browser\.com|)' + - regex: '(S3Gof3r)' + - regex: '\b(ibm-cos-sdk-(?:core|java|js|python))/(\d+)\.(\d+)(?:\.(\d+)|)' + - regex: '^(rusoto)/(\d+)\.(\d+)\.(\d+)' + - regex: '^(rclone)/v(\d+)\.(\d+)' + - regex: '^(Roku)/DVP-(\d+)\.(\d+)' + - regex: '(Kurio)\/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'Kurio App' + - regex: '^(Box(?: Sync)?)/(\d+)\.(\d+)\.(\d+)' + - regex: '^(ViaFree|Viafree)-(?:tvOS-)?[A-Z]{2}/(\d+)\.(\d+)\.(\d+)' + family_replacement: 'ViaFree' + - regex: '(Transmit)/(\d+)\.(\d+)\.(\d+)' + - regex: '(Download Master)' + - regex: '\b(HTTrack) (\d+)\.(\d+)(?:[\.\-](\d+)|)' + - regex: '(Ladybird)\/(\d+)\.(\d+)' + - regex: '(MullvadBrowser)/(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' +os_parsers: + - regex: 'HbbTV/\d+\.\d+\.\d+ \( ;(LG)E ;NetCast 4.0' + os_v1_replacement: '2013' + - regex: 'HbbTV/\d+\.\d+\.\d+ \( ;(LG)E ;NetCast 3.0' + os_v1_replacement: '2012' + - regex: 'HbbTV/1.1.1 \(;;;;;\) Maple_2011' + os_replacement: 'Samsung' + os_v1_replacement: '2011' + - regex: 'HbbTV/\d+\.\d+\.\d+ \(;(Samsung);SmartTV([0-9]{4});.{0,200}FXPDEUC' + os_v2_replacement: 'UE40F7000' + - regex: 'HbbTV/\d+\.\d+\.\d+ \(;(Samsung);SmartTV([0-9]{4});.{0,200}MST12DEUC' + os_v2_replacement: 'UE32F4500' + - regex: 'HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/4' + os_v1_replacement: '2013' + - regex: 'HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/3' + os_v1_replacement: '2012' + - regex: 'HbbTV/1\.1\.1 \(; (Philips);.{0,200}NETTV/2' + os_v1_replacement: '2011' + - regex: 'HbbTV/\d+\.\d+\.\d+.{0,100}(firetv)-firefox-plugin (\d+).(\d+).(\d+)' + os_replacement: 'FireHbbTV' + - regex: 'HbbTV/\d+\.\d+\.\d+ \(.{0,30}; ?([a-zA-Z]+) ?;.{0,30}(201[1-9]).{0,30}\)' + - regex: 'AspiegelBot|PetalBot' + os_replacement: 'Other' + - regex: '(Windows Phone) (?:OS[ /])?(\d+)\.(\d+)' + - regex: '(CPU[ +]OS|iPhone[ +]OS|CPU[ +]iPhone)[ +]+(\d+)[_\.](\d+)(?:[_\.](\d+)|).{0,100}Outlook-iOS-Android' + os_replacement: 'iOS' + - regex: 'ArcGIS\.?(iOS|Android)-\d+\.\d+(?:\.\d+|)(?:[^\/]{1,50}|)\/(\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + - regex: 'ArcGISRuntime-(?:Android|iOS)\/\d+\.\d+(?:\.\d+|) \((Android|iOS) (\d+)(?:\.(\d+)(?:\.(\d+)|)|);' + - regex: '(Android) (\d+)(?:\.(\d+)).*CrKey' + os_replacement: 'Chromecast Android' + - regex: 'Fuchsia.*(CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|)' + os_replacement: 'Chromecast Fuchsia' + - regex: 'Linux.*(CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|).*DeviceType/SmartSpeaker' + os_replacement: 'Chromecast SmartSpeaker' + - regex: 'Linux.*(CrKey)(?:[/](\d+)\.(\d+)(?:\.(\d+)|)|)' + os_replacement: 'Chromecast Linux' + - regex: '(Android)[ \-/](\d+)(?:\.(\d+)|)(?:[.\-]([a-z0-9]+)|)' + - regex: '(Android) Donut' + os_v1_replacement: '1' + os_v2_replacement: '2' + - regex: '(Android) Eclair' + os_v1_replacement: '2' + os_v2_replacement: '1' + - regex: '(Android) Froyo' + os_v1_replacement: '2' + os_v2_replacement: '2' + - regex: '(Android) Gingerbread' + os_v1_replacement: '2' + os_v2_replacement: '3' + - regex: '(Android) Honeycomb' + os_v1_replacement: '3' + - regex: '(Android) (\d+);' + - regex: '(Android): (\d+)(?:\.(\d+)(?:\.(\d+)|)|);' + - regex: '^UCWEB.{0,200}; (Adr) (\d+)\.(\d+)(?:[.\-]([a-z0-9]{1,100})|);' + os_replacement: 'Android' + - regex: '^UCWEB.{0,200}; (iPad|iPh|iPd) OS (\d+)_(\d+)(?:_(\d+)|);' + os_replacement: 'iOS' + - regex: '^UCWEB.{0,200}; (wds) (\d+)\.(\d+)(?:\.(\d+)|);' + os_replacement: 'Windows Phone' + - regex: '^(JUC).{0,200}; ?U; ?(?:Android|)(\d+)\.(\d+)(?:[\.\-]([a-z0-9]{1,100})|)' + os_replacement: 'Android' + - regex: '(android)\s(?:mobile\/)(\d+)(?:\.(\d+)(?:\.(\d+)|)|)' + os_replacement: 'Android' + - regex: 'Quest' + os_replacement: 'Android' + - regex: '(Silk-Accelerated=[a-z]{4,5})' + os_replacement: 'Android' + - regex: '(x86_64|aarch64)\ (\d+)\.(\d+)\.(\d+).{0,100}Chrome.{0,100}(?:CitrixChromeApp)$' + os_replacement: 'Chrome OS' + - regex: '(XBLWP7)' + os_replacement: 'Windows Phone' + - regex: '(Windows ?Mobile)' + os_replacement: 'Windows Mobile' + - regex: '(Windows 10)' + os_replacement: 'Windows' + os_v1_replacement: '10' + - regex: '(Windows (?:NT 5\.2|NT 5\.1))' + os_replacement: 'Windows' + os_v1_replacement: 'XP' + - regex: '(Win(?:dows NT |32NT\/)6\.1)' + os_replacement: 'Windows' + os_v1_replacement: '7' + - regex: '(Win(?:dows NT |32NT\/)6\.0)' + os_replacement: 'Windows' + os_v1_replacement: 'Vista' + - regex: '(Win 9x 4\.90)' + os_replacement: 'Windows' + os_v1_replacement: 'ME' + - regex: '(Windows NT 6\.2; ARM;)' + os_replacement: 'Windows' + os_v1_replacement: 'RT' + - regex: '(Win(?:dows NT |32NT\/)6\.2)' + os_replacement: 'Windows' + os_v1_replacement: '8' + - regex: '(Windows NT 6\.3; ARM;)' + os_replacement: 'Windows' + os_v1_replacement: 'RT 8' + os_v2_replacement: '1' + - regex: '(Win(?:dows NT |32NT\/)6\.3)' + os_replacement: 'Windows' + os_v1_replacement: '8' + os_v2_replacement: '1' + - regex: '(Win(?:dows NT |32NT\/)6\.4)' + os_replacement: 'Windows' + os_v1_replacement: '10' + - regex: '(Windows NT 10\.0)' + os_replacement: 'Windows' + os_v1_replacement: '10' + - regex: '(Windows NT 5\.0)' + os_replacement: 'Windows' + os_v1_replacement: '2000' + - regex: '(WinNT4.0)' + os_replacement: 'Windows' + os_v1_replacement: 'NT 4.0' + - regex: '(Windows ?CE)' + os_replacement: 'Windows' + os_v1_replacement: 'CE' + - regex: 'Win(?:dows)? ?(95|98|3.1|NT|ME|2000|XP|Vista|7|CE)' + os_replacement: 'Windows' + os_v1_replacement: '$1' + - regex: 'Win16' + os_replacement: 'Windows' + os_v1_replacement: '3.1' + - regex: 'Win32' + os_replacement: 'Windows' + os_v1_replacement: '95' + - regex: '^Box.{0,200}Windows/([\d.]+);' + os_replacement: 'Windows' + os_v1_replacement: '$1' + - regex: '(Tizen)[/ ](\d+)\.(\d+)' + - regex: 'Intel Mac OS X.+(CriOS|EdgiOS)/\d+' + os_replacement: 'iOS' + - regex: '((?:Mac[ +]?|; )OS[ +]X)[\s+/](?:(\d+)[_.](\d+)(?:[_.](\d+)|)|Mach-O)' + os_replacement: 'Mac OS X' + - regex: 'Mac OS X\s.{1,50}\s(\d+).(\d+).(\d+)' + os_replacement: 'Mac OS X' + os_v1_replacement: '$1' + os_v2_replacement: '$2' + os_v3_replacement: '$3' + - regex: ' (Dar)(win)/(9).(\d+).{0,100}\((?:i386|x86_64|Power Macintosh)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '5' + - regex: ' (Dar)(win)/(10).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '6' + - regex: ' (Dar)(win)/(11).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '7' + - regex: ' (Dar)(win)/(12).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '8' + - regex: ' (Dar)(win)/(13).(\d+).{0,100}\((?:i386|x86_64)\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '9' + - regex: 'Mac_PowerPC' + os_replacement: 'Mac OS' + - regex: '(?:PPC|Intel) (Mac OS X)' + - regex: '^Box.{0,200};(Darwin)/(10)\.(1\d)(?:\.(\d+)|)' + os_replacement: 'Mac OS X' + - regex: 'darwin; arm64' + os_replacement: 'Mac OS X' + - regex: '(Apple\s?TV)(?:/(\d+)\.(\d+)|)' + os_replacement: 'ATV OS X' + - regex: '(CPU[ +]OS|iPhone[ +]OS|CPU[ +]iPhone|CPU IPhone OS|CPU iPad OS)[ +]+(\d+)[_\.](\d+)(?:[_\.](\d+)|)' + os_replacement: 'iOS' + - regex: '(iPhone|iPad|iPod); Opera' + os_replacement: 'iOS' + - regex: '(iPhone|iPad|iPod).{0,100}Mac OS X.{0,100}Version/(\d+)\.(\d+)' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(5)48\.0\.3.{0,100} Darwin/11\.0\.0' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(5)48\.(0)\.4.{0,100} Darwin/(1)1\.0\.0' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(5)48\.(1)\.4' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(4)85\.1(3)\.9' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(6)09\.(1)\.4' + os_replacement: 'iOS' + - regex: '(CFNetwork)/(6)(0)9' + os_replacement: 'iOS' + - regex: '(CFNetwork)/6(7)2\.(1)\.13' + os_replacement: 'iOS' + - regex: '(CFNetwork)/6(7)2\.(1)\.(1)4' + os_replacement: 'iOS' + - regex: '(CF)(Network)/6(7)(2)\.1\.15' + os_replacement: 'iOS' + os_v1_replacement: '7' + os_v2_replacement: '1' + - regex: '(CFNetwork)/6(7)2\.(0)\.(?:2|8)' + os_replacement: 'iOS' + - regex: '(CFNetwork)/709\.1' + os_replacement: 'iOS' + os_v1_replacement: '8' + os_v2_replacement: '0.b5' + - regex: '(CF)(Network)/711\.(\d)' + os_replacement: 'iOS' + os_v1_replacement: '8' + - regex: '(CF)(Network)/(720)\.(\d)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '10' + - regex: '(CF)(Network)/(760)\.(\d)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '11' + - regex: 'CFNetwork/7.{0,100} Darwin/15\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '1' + - regex: 'CFNetwork/7.{0,100} Darwin/15\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '2' + - regex: 'CFNetwork/7.{0,100} Darwin/15\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '3' + os_v3_replacement: '5' + - regex: '(CF)(Network)/758\.(\d)' + os_replacement: 'iOS' + os_v1_replacement: '9' + - regex: 'CFNetwork/808\.3 Darwin/16\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '2' + os_v3_replacement: '1' + - regex: '(CF)(Network)/808\.(\d)' + os_replacement: 'iOS' + os_v1_replacement: '10' + - regex: 'CFNetwork/.{0,100} Darwin/17\.\d+.{0,100}\(x86_64\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '13' + - regex: 'CFNetwork/.{0,100} Darwin/16\.\d+.{0,100}\(x86_64\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '12' + - regex: 'CFNetwork/8.{0,100} Darwin/15\.\d+.{0,100}\(x86_64\)' + os_replacement: 'Mac OS X' + os_v1_replacement: '10' + os_v2_replacement: '11' + - regex: 'CFNetwork/.{0,100} Darwin/(9)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '1' + - regex: 'CFNetwork/.{0,100} Darwin/(10)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '4' + - regex: 'CFNetwork/.{0,100} Darwin/(11)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '5' + - regex: 'CFNetwork/.{0,100} Darwin/(13)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '6' + - regex: 'CFNetwork/6.{0,100} Darwin/(14)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '7' + - regex: 'CFNetwork/7.{0,100} Darwin/(14)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '8' + os_v2_replacement: '0' + - regex: 'CFNetwork/7.{0,100} Darwin/(15)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '9' + os_v2_replacement: '0' + - regex: 'CFNetwork/8.{0,100} Darwin/16\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '3' + - regex: 'CFNetwork/8.{0,100} Darwin/16\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: '2' + - regex: 'CFNetwork/8.{0,100} Darwin/16\.7\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + os_v2_replacement: '3' + os_v3_replacement: '3' + - regex: 'CFNetwork/8.{0,100} Darwin/(16)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '10' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.0\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '0' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '1' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '2' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '2' + os_v3_replacement: '6' + - regex: 'CFNetwork/8.{0,100} Darwin/17\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '3' + - regex: 'CFNetwork/9.{0,100} Darwin/17\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '4' + - regex: 'CFNetwork/9.{0,100} Darwin/17\.7\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + os_v2_replacement: '4' + os_v3_replacement: '1' + - regex: 'CFNetwork/8.{0,100} Darwin/(17)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '11' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.0\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '0' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '1' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '2' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '3' + - regex: 'CFNetwork/9.{0,100} Darwin/18\.7\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + os_v2_replacement: '4' + - regex: 'CFNetwork/9.{0,100} Darwin/(18)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '12' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '3' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '3' + os_v3_replacement: '1' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '4' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '5' + - regex: 'CFNetwork/11.{0,100} Darwin/19\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + os_v2_replacement: '6' + - regex: 'CFNetwork/1[01].{0,100} Darwin/19\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '13' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.1\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '2' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '3' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '4' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '5' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '6' + - regex: 'CFNetwork/12.{0,100} Darwin/20\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + os_v2_replacement: '8' + - regex: 'CFNetwork/.{0,100} Darwin/(20)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '14' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.0\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '0' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.1\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '1' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.2\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '2' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.3\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '3' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.4\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '4' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.5\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '5' + - regex: 'CFNetwork/13.{0,100} Darwin/21\.6\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + os_v2_replacement: '6' + - regex: 'CFNetwork/.{0,100} Darwin/(21)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '15' + - regex: 'CFNetwork/.{0,100} Darwin/22\.([0-5])\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '16' + os_v2_replacement: '$1' + - regex: 'CFNetwork/.{0,100} Darwin/(22)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '16' + - regex: 'CFNetwork/.{0,100} Darwin/23\.([0-5])\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '17' + os_v2_replacement: '$1' + - regex: 'CFNetwork/.{0,100} Darwin/(23)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '17' + - regex: 'CFNetwork/.{0,100} Darwin/24\.([0-5])\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '18' + os_v2_replacement: '$1' + - regex: 'CFNetwork/.{0,100} Darwin/(24)\.\d+' + os_replacement: 'iOS' + os_v1_replacement: '18' + - regex: 'CFNetwork/.{0,100} Darwin/' + os_replacement: 'iOS' + - regex: '\b(iOS[ /]|iOS; |iPhone(?:/| v|[ _]OS[/,]|; | OS : |\d,\d/|\d,\d; )|iPad/)(\d{1,2})[_\.](\d{1,2})(?:[_\.](\d+)|)' + os_replacement: 'iOS' + - regex: '\((iOS);' + - regex: '(watchOS)[/ ](\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'WatchOS' + - regex: 'Outlook-(iOS)/\d+\.\d+\.prod\.iphone' + - regex: '(iPod|iPhone|iPad)' + os_replacement: 'iOS' + - regex: '(tvOS)[/ ](\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'tvOS' + - regex: '(CrOS) [a-z0-9_]+ (\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'Chrome OS' + - regex: '([Dd]ebian)' + os_replacement: 'Debian' + - regex: '(Linux Mint)(?:/(\d+)|)' + - regex: '(Mandriva)(?: Linux|)/(?:[\d.-]+m[a-z]{2}(\d+).(\d)|)' + - regex: '(Symbian[Oo][Ss])[/ ](\d+)\.(\d+)' + os_replacement: 'Symbian OS' + - regex: '(Symbian/3).{1,200}NokiaBrowser/7\.3' + os_replacement: 'Symbian^3 Anna' + - regex: '(Symbian/3).{1,200}NokiaBrowser/7\.4' + os_replacement: 'Symbian^3 Belle' + - regex: '(Symbian/3)' + os_replacement: 'Symbian^3' + - regex: '\b(Series 60|SymbOS|S60Version|S60V\d|S60\b)' + os_replacement: 'Symbian OS' + - regex: '(MeeGo)' + - regex: 'Symbian [Oo][Ss]' + os_replacement: 'Symbian OS' + - regex: 'Series40;' + os_replacement: 'Nokia Series 40' + - regex: 'Series30Plus;' + os_replacement: 'Nokia Series 30 Plus' + - regex: '(BB10);.{1,200}Version/(\d+)\.(\d+)\.(\d+)' + os_replacement: 'BlackBerry OS' + - regex: '(Black[Bb]erry)[0-9a-z]+/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'BlackBerry OS' + - regex: '(Black[Bb]erry).{1,200}Version/(\d+)\.(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'BlackBerry OS' + - regex: '(RIM Tablet OS) (\d+)\.(\d+)\.(\d+)' + os_replacement: 'BlackBerry Tablet OS' + - regex: '(Play[Bb]ook)' + os_replacement: 'BlackBerry Tablet OS' + - regex: '(Black[Bb]erry)' + os_replacement: 'BlackBerry OS' + - regex: '(K[Aa][Ii]OS)\/(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'KaiOS' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/18.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '0' + os_v3_replacement: '1' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/18.1 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '1' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/26.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '2' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/28.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '3' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/30.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '1' + os_v2_replacement: '4' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/32.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '2' + os_v2_replacement: '0' + - regex: '\((?:Mobile|Tablet);.{1,200}Gecko/34.0 Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + os_v1_replacement: '2' + os_v2_replacement: '1' + - regex: '\((?:Mobile|Tablet);.{1,200}Firefox/\d+\.\d+' + os_replacement: 'Firefox OS' + - regex: '(BREW)[ /](\d+)\.(\d+)\.(\d+)' + - regex: '(BREW);' + - regex: '(Brew MP|BMP)[ /](\d+)\.(\d+)\.(\d+)' + os_replacement: 'Brew MP' + - regex: 'BMP;' + os_replacement: 'Brew MP' + - regex: '(GoogleTV)(?: (\d+)\.(\d+)(?:\.(\d+)|)|/[\da-z]+)' + - regex: '(WebTV)/(\d+).(\d+)' + - regex: '(hpw|web)OS/(\d+)\.(\d+)(?:\.(\d+)|)' + os_replacement: 'webOS' + - regex: '(VRE);' + - regex: '(Fedora|Red Hat|PCLinuxOS|Puppy|Ubuntu|Kindle|Bada|Sailfish|Lubuntu|BackTrack|Slackware|(?:Free|Open|Net|\b)BSD)[/ ](\d+)\.(\d+)(?:\.(\d+)|)(?:\.(\d+)|)' + - regex: '(Linux)[ /](\d+)\.(\d+)(?:\.(\d+)|).{0,100}gentoo' + os_replacement: 'Gentoo' + - regex: '\((Bada);' + - regex: '(Windows|Android|WeTab|Maemo|Web0S)' + - regex: '(Ubuntu|Kubuntu|Arch Linux|CentOS|Slackware|Gentoo|openSUSE|SUSE|Red Hat|Fedora|PCLinuxOS|Mageia|SerenityOS|(?:Free|Open|Net|\b)BSD)' + - regex: '(Linux)(?:[ /](\d+)\.(\d+)(?:\.(\d+)|)|)' + - regex: 'SunOS' + os_replacement: 'Solaris' + - regex: '\(linux-gnu\)' + os_replacement: 'Linux' + - regex: '\(x86_64-redhat-linux-gnu\)' + os_replacement: 'Red Hat' + - regex: '\((freebsd)(\d+)\.(\d+)\)' + os_replacement: 'FreeBSD' + - regex: 'linux' + os_replacement: 'Linux' + - regex: '^(Roku)/DVP-(\d+)\.(\d+)' + os_replacement: 'Mac OS X' + os_v1_replacement: '$1' + os_v2_replacement: '$2' + os_v3_replacement: '$3' +device_parsers: + - regex: '^.{0,100}?(?:(?:iPhone|Windows CE|Windows Phone|Android).{0,300}(?:(?:Bot|Yeti)-Mobile|YRSpider|BingPreview|bots?/\d|(?:bot|spider)\.html)|AdsBot-Google-Mobile.{0,200}iPhone)' + regex_flag: 'i' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Smartphone' + - regex: '^.{0,100}?(?:DoCoMo|\bMOT\b|\bLG\b|Nokia|Samsung|SonyEricsson).{0,200}(?:(?:Bot|Yeti)-Mobile|bots?/\d|(?:bot|crawler)\.html|(?:jump|google|Wukong)bot|ichiro/mobile|/spider|YahooSeeker)' + regex_flag: 'i' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Feature Phone' + - regex: ' PTST/\d+(?:\.\d+|)$' + device_replacement: 'Spider' + brand_replacement: 'Spider' + - regex: 'X11; Datanyze; Linux' + device_replacement: 'Spider' + brand_replacement: 'Spider' + - regex: 'Mozilla.{1,100}Mobile.{1,100}(AspiegelBot|PetalBot)' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Smartphone' + - regex: 'Mozilla.{0,200}(AspiegelBot|PetalBot)' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Desktop' + - regex: '\bSmartWatch {0,2}\( {0,2}([^;]{1,200}) {0,2}; {0,2}([^;]{1,200}) {0,2};' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'Android Application[^\-]{1,300} - (Sony) ?(Ericsson|) (.{1,200}) \w{1,20} - ' + device_replacement: '$1 $2' + brand_replacement: '$1$2' + model_replacement: '$3' + - regex: 'Android Application[^\-]{1,300} - (?:HTC|HUAWEI|LGE|LENOVO|MEDION|TCT) (HTC|HUAWEI|LG|LENOVO|MEDION|ALCATEL)[ _\-](.{1,200}) \w{1,20} - ' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'Android Application[^\-]{1,300} - ([^ ]+) (.{1,200}) \w{1,20} - ' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}([BLRQ]C\d{4}[A-Z]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '3Q $1' + brand_replacement: '3Q' + model_replacement: '$1' + - regex: '; {0,2}(?:3Q_)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '3Q $1' + brand_replacement: '3Q' + model_replacement: '$1' + - regex: 'Android [34].{0,200}; {0,2}(A100|A101|A110|A200|A210|A211|A500|A501|A510|A511|A700(?: Lite| 3G|)|A701|B1-A71|A1-\d{3}|B1-\d{3}|V360|V370|W500|W500P|W501|W501P|W510|W511|W700|Slider SL101|DA22[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '; {0,2}Acer Iconia Tab ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '; {0,2}(Z1[1235]0|E320[^/]{0,10}|S500|S510|Liquid[^;/]{0,30}|Iconia A\d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '; {0,2}(Acer |ACER )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Acer' + model_replacement: '$2' + - regex: '; {0,2}(Advent |)(Vega(?:Bean|Comb|)).{0,200}?(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Advent' + model_replacement: '$2' + - regex: '; {0,2}(Ainol |)((?:NOVO|[Nn]ovo)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Ainol' + model_replacement: '$2' + - regex: '; {0,2}AIRIS[ _\-]?([^/;\)]+) {0,2}(?:;|\)|Build)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Airis' + model_replacement: '$1' + - regex: '; {0,2}(OnePAD[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Airis' + model_replacement: '$1' + - regex: '; {0,2}Airpad[ \-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Airpad $1' + brand_replacement: 'Airpad' + model_replacement: '$1' + - regex: '; {0,2}(one ?touch) (EVO7|T10|T20)(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel One Touch $2' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch $2' + - regex: '; {0,2}(?:alcatel[ _]|)(?:(?:one[ _]?touch[ _])|ot[ \-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Alcatel One Touch $1' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch $1' + - regex: '; {0,2}(TCL)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(Vodafone Smart II|Optimus_Madrid)(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: '; {0,2}BASE_Lutea_3(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel One Touch 998' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch 998' + - regex: '; {0,2}BASE_Varia(?: Build|\) AppleWebKit)' + device_replacement: 'Alcatel One Touch 918D' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch 918D' + - regex: '; {0,2}((?:FINE|Fine)\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Allfine' + model_replacement: '$1' + - regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?)((?:Speed|SPEED).{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Allview' + model_replacement: '$2' + - regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?|)(AX1_Shine|AX2_Frenzy)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Allview' + model_replacement: '$2' + - regex: '; {0,2}(ALLVIEW[ _]?|Allview[ _]?)([^;/]*?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Allview' + model_replacement: '$2' + - regex: '; {0,2}(A13-MID)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Allwinner' + model_replacement: '$1' + - regex: '; {0,2}(Allwinner)[ _\-]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Allwinner' + model_replacement: '$1' + - regex: '; {0,2}(A651|A701B?|A702|A703|A705|A706|A707|A711|A712|A713|A717|A722|A785|A801|A802|A803|A901|A902|A1002|A1003|A1006|A1007|A9701|A9703|Q710|Q80)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Amaway' + model_replacement: '$1' + - regex: '; {0,2}(?:AMOI|Amoi)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Amoi $1' + brand_replacement: 'Amoi' + model_replacement: '$1' + - regex: '^(?:AMOI|Amoi)[ _]([^;/]{1,100}?) Linux' + device_replacement: 'Amoi $1' + brand_replacement: 'Amoi' + model_replacement: '$1' + - regex: '; {0,2}(MW(?:0[789]|10)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Aoc' + model_replacement: '$1' + - regex: '; {0,2}(G7|M1013|M1015G|M11[CG]?|M-?12[B]?|M15|M19[G]?|M30[ACQ]?|M31[GQ]|M32|M33[GQ]|M36|M37|M38|M701T|M710|M712B|M713|M715G|M716G|M71(?:G|GS|T|)|M72[T]?|M73[T]?|M75[GT]?|M77G|M79T|M7L|M7LN|M81|M810|M81T|M82|M92|M92KS|M92S|M717G|M721|M722G|M723|M725G|M739|M785|M791|M92SK|M93D)(?: Build|\) AppleWebKit)' + device_replacement: 'Aoson $1' + brand_replacement: 'Aoson' + model_replacement: '$1' + - regex: '; {0,2}Aoson ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Aoson $1' + brand_replacement: 'Aoson' + model_replacement: '$1' + - regex: '; {0,2}[Aa]panda[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Apanda $1' + brand_replacement: 'Apanda' + model_replacement: '$1' + - regex: '; {0,2}(?:ARCHOS|Archos) ?(GAMEPAD.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: 'ARCHOS; GOGI; ([^;]{1,200});' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '(?:ARCHOS|Archos)[ _]?(.{0,200}?)(?: Build|[;/\(\)\-]|$)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '; {0,2}(AN(?:7|8|9|10|13)[A-Z0-9]{1,4})(?: Build|\) AppleWebKit)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '; {0,2}(A28|A32|A43|A70(?:BHT|CHT|HB|S|X)|A101(?:B|C|IT)|A7EB|A7EB-WK|101G9|80G9)(?: Build|\) AppleWebKit)' + device_replacement: 'Archos $1' + brand_replacement: 'Archos' + model_replacement: '$1' + - regex: '; {0,2}(PAD-FMD[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Arival' + model_replacement: '$1' + - regex: '; {0,2}(BioniQ) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Arival' + model_replacement: '$1 $2' + - regex: '; {0,2}(AN\d[^;/]{1,100}|ARCHM\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'Arnova $1' + brand_replacement: 'Arnova' + model_replacement: '$1' + - regex: '; {0,2}(?:ARNOVA|Arnova) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Arnova $1' + brand_replacement: 'Arnova' + model_replacement: '$1' + - regex: '; {0,2}(?:ASSISTANT |)(AP)-?([1789]\d{2}[A-Z]{0,2}|80104)(?: Build|\) AppleWebKit)' + device_replacement: 'Assistant $1-$2' + brand_replacement: 'Assistant' + model_replacement: '$1-$2' + - regex: '; {0,2}(ME17\d[^;/]*|ME3\d{2}[^;/]{1,100}|K00[A-Z]|Nexus 10|Nexus 7(?: 2013|)|PadFone[^;/]*|Transformer[^;/]*|TF\d{3}[^;/]*|eeepc)(?: Build|\) AppleWebKit)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '; {0,2}ASUS[ _]{0,10}([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '; {0,2}Garmin-Asus ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Garmin-Asus $1' + brand_replacement: 'Garmin-Asus' + model_replacement: '$1' + - regex: '; {0,2}(Garminfone)(?: Build|\) AppleWebKit)' + device_replacement: 'Garmin $1' + brand_replacement: 'Garmin-Asus' + model_replacement: '$1' + - regex: '; (@TAB-[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Attab' + model_replacement: '$1' + - regex: '; {0,2}(T-(?:07|[^0]\d)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Audiosonic' + model_replacement: '$1' + - regex: '; {0,2}(?:Axioo[ _\-]([^;/]{1,100}?)|(picopad)[ _\-]([^;/]{1,100}?))(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Axioo $1$2 $3' + brand_replacement: 'Axioo' + model_replacement: '$1$2 $3' + - regex: '; {0,2}(V(?:100|700|800)[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Azend' + model_replacement: '$1' + - regex: '; {0,2}(IBAK\-[^;/]*)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Bak' + model_replacement: '$1' + - regex: '; {0,2}(HY5001|HY6501|X12|X21|I5)(?: Build|\) AppleWebKit)' + device_replacement: 'Bedove $1' + brand_replacement: 'Bedove' + model_replacement: '$1' + - regex: '; {0,2}(JC-[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'Benss $1' + brand_replacement: 'Benss' + model_replacement: '$1' + - regex: '; {0,2}(BB) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Blackberry' + model_replacement: '$2' + - regex: '; {0,2}(BlackBird)[ _](I8.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(BlackBird)[ _](.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}([0-9]+BP[EM][^;/]*|Endeavour[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Blaupunkt $1' + brand_replacement: 'Blaupunkt' + model_replacement: '$1' + - regex: '; {0,2}((?:BLU|Blu)[ _\-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Blu' + model_replacement: '$2' + - regex: '; {0,2}(?:BMOBILE )?(Blu|BLU|DASH [^;/]{1,100}|VIVO 4\.3|TANK 4\.5)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Blu' + model_replacement: '$1' + - regex: '; {0,2}(TOUCH\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Blusens' + model_replacement: '$1' + - regex: '; {0,2}(AX5\d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Bmobile' + model_replacement: '$1' + - regex: '; {0,2}([Bb]q) ([^;/]{1,100}?);?(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'bq' + model_replacement: '$2' + - regex: '; {0,2}(Maxwell [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'bq' + model_replacement: '$1' + - regex: '; {0,2}((?:B-Tab|B-TAB) ?\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Braun' + model_replacement: '$1' + - regex: '; {0,2}(Broncho) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}CAPTIVA ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Captiva $1' + brand_replacement: 'Captiva' + model_replacement: '$1' + - regex: '; {0,2}(C771|CAL21|IS11CA)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Casio' + model_replacement: '$1' + - regex: '; {0,2}(?:Cat|CAT) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Cat $1' + brand_replacement: 'Cat' + model_replacement: '$1' + - regex: '; {0,2}(?:Cat)(Nova.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Cat $1' + brand_replacement: 'Cat' + model_replacement: '$1' + - regex: '; {0,2}(INM8002KP|ADM8000KP_[AB])(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Cat' + model_replacement: 'Tablet PHOENIX 8.1J0' + - regex: '; {0,2}(?:[Cc]elkon[ _\*]|CELKON[ _\*])([^;/\)]+) ?(?:Build|;|\))' + device_replacement: '$1' + brand_replacement: 'Celkon' + model_replacement: '$1' + - regex: 'Build/(?:[Cc]elkon)+_?([^;/_\)]+)' + device_replacement: '$1' + brand_replacement: 'Celkon' + model_replacement: '$1' + - regex: '; {0,2}(CT)-?(\d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Celkon' + model_replacement: '$1$2' + - regex: '; {0,2}(A19|A19Q|A105|A107[^;/\)]*) ?(?:Build|;|\))' + device_replacement: '$1' + brand_replacement: 'Celkon' + model_replacement: '$1' + - regex: '; {0,2}(TPC[0-9]{4,5})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ChangJia' + model_replacement: '$1' + - regex: 'CrKey.*DeviceType/([^/]*)' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: '$1' + - regex: 'Fuchsia.*CrKey' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: 'Nest Hub' + - regex: 'Linux.*CrKey/1.36' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: 'First Generation' + - regex: 'CrKey/' + brand_replacement: 'Google' + device_replacement: 'Chromecast' + model_replacement: 'Chromecast' + - regex: '; {0,2}(Cloudfone)[ _](Excite)([^ ][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2 $3' + brand_replacement: 'Cloudfone' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(Excite|ICE)[ _](\d+[^;/]{0,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Cloudfone $1 $2' + brand_replacement: 'Cloudfone' + model_replacement: 'Cloudfone $1 $2' + - regex: '; {0,2}(Cloudfone|CloudPad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Cloudfone' + model_replacement: '$1 $2' + - regex: '; {0,2}((?:Aquila|Clanga|Rapax)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Cmx' + model_replacement: '$1' + - regex: '; {0,2}(?:CFW-|Kyros )?(MID[0-9]{4}(?:[ABC]|SR|TV)?)(\(3G\)-4G| GB 8K| 3G| 8K| GB)? {0,2}(?:Build|[;\)])' + device_replacement: 'CobyKyros $1$2' + brand_replacement: 'CobyKyros' + model_replacement: '$1$2' + - regex: '; {0,2}([^;/]{0,50})Coolpad[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Coolpad' + model_replacement: '$1$2' + - regex: '; {0,2}(CUBE[ _])?([KU][0-9]+ ?GT.{0,200}?|A5300)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1$2' + brand_replacement: 'Cube' + model_replacement: '$2' + - regex: '; {0,2}CUBOT ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Cubot' + model_replacement: '$1' + - regex: '; {0,2}(BOBBY)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Cubot' + model_replacement: '$1' + - regex: '; {0,2}(Dslide [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Danew' + model_replacement: '$1' + - regex: '; {0,2}(XCD)[ _]?(28|35)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1$2' + brand_replacement: 'Dell' + model_replacement: '$1$2' + - regex: '; {0,2}(001DL)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak' + - regex: '; {0,2}(?:Dell|DELL) (Streak)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak' + - regex: '; {0,2}(101DL|GS01|Streak Pro[^;/]{0,100})(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak Pro' + - regex: '; {0,2}([Ss]treak ?7)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: 'Streak 7' + - regex: '; {0,2}(Mini-3iX)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: '; {0,2}(?:Dell|DELL)[ _](Aero|Venue|Thunder|Mini.{0,200}?|Streak[ _]Pro)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: '; {0,2}Dell[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: '; {0,2}(TA[CD]-\d+[^;/]{0,100})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Denver' + model_replacement: '$1' + - regex: '; {0,2}(iP[789]\d{2}(?:-3G)?|IP10\d{2}(?:-8GB)?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Dex' + model_replacement: '$1' + - regex: '; {0,2}(AirTab)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'DNS' + model_replacement: '$1 $2' + - regex: '; {0,2}(F\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: '$1' + - regex: '; {0,2}(HT-03A)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Magic' + - regex: '; {0,2}(HT\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(L\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '; {0,2}(N\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Nec' + model_replacement: '$1' + - regex: '; {0,2}(P\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(SC\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(SH\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(SO\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: '; {0,2}(T\-0[12][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Toshiba' + model_replacement: '$1' + - regex: '; {0,2}(DOOV)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'DOOV' + model_replacement: '$2' + - regex: '; {0,2}(Enot|ENOT)[ -]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Enot' + model_replacement: '$2' + - regex: '; {0,2}[^;/]{1,100} Build/(?:CROSS|Cross)+[ _\-]([^\)]+)' + device_replacement: 'CROSS $1' + brand_replacement: 'Evercoss' + model_replacement: 'Cross $1' + - regex: '; {0,2}(CROSS|Cross)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Evercoss' + model_replacement: 'Cross $2' + - regex: '; {0,2}Explay[_ ](.{1,200}?)(?:[\)]| Build)' + device_replacement: '$1' + brand_replacement: 'Explay' + model_replacement: '$1' + - regex: '; {0,2}(IQ.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fly' + model_replacement: '$1' + - regex: '; {0,2}(Fly|FLY)[ _](IQ[^;]{1,100}?|F[34]\d+[^;]{0,100}?);?(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Fly' + model_replacement: '$2' + - regex: '; {0,2}(M532|Q572|FJL21)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: '$1' + - regex: '; {0,2}(G1)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Galapad' + model_replacement: '$1' + - regex: '; {0,2}(Geeksphone) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(G[^F]?FIVE) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Gfive' + model_replacement: '$2' + - regex: '; {0,2}(Gionee)[ _\-]([^;/]{1,100}?)(?:/[^;/]{1,100}|)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Gionee' + model_replacement: '$2' + - regex: '; {0,2}(GN\d+[A-Z]?|INFINITY_PASSION|Ctrl_V1)(?: Build|\) AppleWebKit)' + device_replacement: 'Gionee $1' + brand_replacement: 'Gionee' + model_replacement: '$1' + - regex: '; {0,2}(E3) Build/JOP40D' + device_replacement: 'Gionee $1' + brand_replacement: 'Gionee' + model_replacement: '$1' + - regex: '\sGIONEE[-\s_](\w*)' + regex_flag: 'i' + device_replacement: 'Gionee $1' + brand_replacement: 'Gionee' + model_replacement: '$1' + - regex: '; {0,2}((?:FONE|QUANTUM|INSIGNIA) \d+[^;/]{0,100}|PLAYTAB)(?: Build|\) AppleWebKit)' + device_replacement: 'GoClever $1' + brand_replacement: 'GoClever' + model_replacement: '$1' + - regex: '; {0,2}GOCLEVER ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'GoClever $1' + brand_replacement: 'GoClever' + model_replacement: '$1' + - regex: '; {0,2}(Glass \d+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Google' + model_replacement: '$1' + - regex: '; {0,2}([g|G]oogle)? (Pixel[ a-zA-z0-9]{1,100});(?: Build|.{0,50}\) AppleWebKit)' + device_replacement: '$2' + brand_replacement: 'Google' + model_replacement: '$2' + - regex: '; {0,2}([g|G]oogle)? (Pixel.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$2' + brand_replacement: 'Google' + model_replacement: '$2' + - regex: '; {0,2}(GSmart)[ -]([^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Gigabyte' + model_replacement: '$1 $2' + - regex: '; {0,2}(imx5[13]_[^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: 'Freescale $1' + brand_replacement: 'Freescale' + model_replacement: '$1' + - regex: '; {0,2}Haier[ _\-]([^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: 'Haier $1' + brand_replacement: 'Haier' + model_replacement: '$1' + - regex: '; {0,2}(PAD1016)(?: Build|\) AppleWebKit)' + device_replacement: 'Haipad $1' + brand_replacement: 'Haipad' + model_replacement: '$1' + - regex: '; {0,2}(M701|M7|M8|M9)(?: Build|\) AppleWebKit)' + device_replacement: 'Haipad $1' + brand_replacement: 'Haipad' + model_replacement: '$1' + - regex: '; {0,2}(SN\d+T[^;\)/]*)(?: Build|[;\)])' + device_replacement: 'Hannspree $1' + brand_replacement: 'Hannspree' + model_replacement: '$1' + - regex: 'Build/HCL ME Tablet ([^;\)]{1,3})[\);]' + device_replacement: 'HCLme $1' + brand_replacement: 'HCLme' + model_replacement: '$1' + - regex: '; {0,2}([^;\/]+) Build/HCL' + device_replacement: 'HCLme $1' + brand_replacement: 'HCLme' + model_replacement: '$1' + - regex: '; {0,2}(MID-?\d{4}C[EM])(?: Build|\) AppleWebKit)' + device_replacement: 'Hena $1' + brand_replacement: 'Hena' + model_replacement: '$1' + - regex: '; {0,2}(EG\d{2,}|HS-[^;/]{1,100}|MIRA[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Hisense $1' + brand_replacement: 'Hisense' + model_replacement: '$1' + - regex: '; {0,2}(andromax[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Hisense $1' + brand_replacement: 'Hisense' + model_replacement: '$1' + - regex: '; {0,2}(?:AMAZE[ _](S\d+)|(S\d+)[ _]AMAZE)(?: Build|\) AppleWebKit)' + device_replacement: 'AMAZE $1$2' + brand_replacement: 'hitech' + model_replacement: 'AMAZE $1$2' + - regex: '; {0,2}(PlayBook)(?: Build|\) AppleWebKit)' + device_replacement: 'HP $1' + brand_replacement: 'HP' + model_replacement: '$1' + - regex: '; {0,2}HP ([^/]{1,50})(?: Build|\) AppleWebKit)' + device_replacement: 'HP $1' + brand_replacement: 'HP' + model_replacement: '$1' + - regex: '; {0,2}([^/]{1,30}_tenderloin)(?: Build|\) AppleWebKit)' + device_replacement: 'HP TouchPad' + brand_replacement: 'HP' + model_replacement: 'TouchPad' + - regex: '; {0,2}(HUAWEI |Huawei-|)([UY][^;/]{1,100}) Build/(?:Huawei|HUAWEI)([UY][^\);]+)\)' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}([^;/]{1,100}) Build[/ ]Huawei(MT1-U06|[A-Z]{1,50}\d+[^\);]{1,50})\)' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}(S7|M860) Build' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}((?:HUAWEI|Huawei)[ \-]?)(MediaPad) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}((?:HUAWEI[ _]?|Huawei[ _]|)Ascend[ _])([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}((?:HUAWEI|Huawei)[ _\-]?)((?:G700-|MT-)[^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}((?:HUAWEI|Huawei)[ _\-]?)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: '$2' + - regex: '; {0,2}(MediaPad[^;]{1,200}|SpringBoard) Build/Huawei' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}([^;]{1,200}) Build/(?:Huawei|HUAWEI)' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}([Uu])([89]\d{3}) Build' + device_replacement: '$1$2' + brand_replacement: 'Huawei' + model_replacement: 'U$2' + - regex: '; {0,2}(?:Ideos |IDEOS )(S7) Build' + device_replacement: 'Huawei Ideos$1' + brand_replacement: 'Huawei' + model_replacement: 'Ideos$1' + - regex: '; {0,2}(?:Ideos |IDEOS )([^;/]{1,50}\s{0,5}|\s{0,5})Build' + device_replacement: 'Huawei Ideos$1' + brand_replacement: 'Huawei' + model_replacement: 'Ideos$1' + - regex: '; {0,2}(Orange Daytona|Pulse|Pulse Mini|Vodafone 858|C8500|C8600|C8650|C8660|Nexus 6P|ATH-.{1,200}?) Build[/ ]' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}((?:[A-Z]{3})\-L[A-Za0-9]{2})[\)]' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: '; {0,2}([^;]{1,200}) Build/(HONOR|Honor)' + device_replacement: 'Huawei Honor $1' + brand_replacement: 'Huawei' + model_replacement: 'Honor $1' + - regex: '; {0,2}HTC[ _]([^;]{1,200}); Windows Phone' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1 $2' + brand_replacement: 'HTC' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)|)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1 $2 $3' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(?:HTC[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)|)|)|)(?:[/\\]1\.0 | V|/| +)\d+\.\d[\d\.]*(?: {0,2}Build|\))' + device_replacement: 'HTC $1 $2 $3 $4' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3 $4' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/;]+)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/;\)]+)|)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1 $2' + brand_replacement: 'HTC' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/;\)]+)|)|)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1 $2 $3' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(?:(?:HTC|htc)(?:_blocked|)[ _/])+([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ _/]+)(?:[ _/]([^ /;]+)|)|)|)(?: {0,2}Build|[;\)]| - )' + device_replacement: 'HTC $1 $2 $3 $4' + brand_replacement: 'HTC' + model_replacement: '$1 $2 $3 $4' + - regex: 'HTC Streaming Player [^\/]{0,30}/[^\/]{0,10}/ htc_([^/]{1,10}) /' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '(?:[;,] {0,2}|^)(?:htccn_chs-|)HTC[ _-]?([^;]{1,200}?)(?: {0,2}Build|clay|Android|-?Mozilla| Opera| Profile| UNTRUSTED|[;/\(\)]|$)' + regex_flag: 'i' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(A6277|ADR6200|ADR6300|ADR6350|ADR6400[A-Z]*|ADR6425[A-Z]*|APX515CKT|ARIA|Desire[^_ ]*|Dream|EndeavorU|Eris|Evo|Flyer|HD2|Hero|HERO200|Hero CDMA|HTL21|Incredible|Inspire[A-Z0-9]*|Legend|Liberty|Nexus ?(?:One|HD2)|One|One S C2|One[ _]?(?:S|V|X\+?)\w*|PC36100|PG06100|PG86100|S31HT|Sensation|Wildfire)(?: Build|[/;\(\)])' + regex_flag: 'i' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(ADR6200|ADR6400L|ADR6425LVW|Amaze|DesireS?|EndeavorU|Eris|EVO|Evo\d[A-Z]+|HD2|IncredibleS?|Inspire[A-Z0-9]*|Inspire[A-Z0-9]*|Sensation[A-Z0-9]*|Wildfire)[ _-](.{1,200}?)(?:[/;\)]|Build|MIUI|1\.0)' + regex_flag: 'i' + device_replacement: 'HTC $1 $2' + brand_replacement: 'HTC' + model_replacement: '$1 $2' + - regex: '; {0,2}HYUNDAI (T\d[^/]{0,10})(?: Build|\) AppleWebKit)' + device_replacement: 'Hyundai $1' + brand_replacement: 'Hyundai' + model_replacement: '$1' + - regex: '; {0,2}HYUNDAI ([^;/]{1,10}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Hyundai $1' + brand_replacement: 'Hyundai' + model_replacement: '$1' + - regex: '; {0,2}(X700|Hold X|MB-6900)(?: Build|\) AppleWebKit)' + device_replacement: 'Hyundai $1' + brand_replacement: 'Hyundai' + model_replacement: '$1' + - regex: '; {0,2}(?:iBall[ _\-]|)(Andi)[ _]?(\d[^;/]*)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'iBall' + model_replacement: '$1 $2' + - regex: '; {0,2}(IBall)(?:[ _]([^;/]{1,100}?)|)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'iBall' + model_replacement: '$2' + - regex: '; {0,2}(NT-\d+[^ ;/]{0,50}|Net[Tt]AB [^;/]{1,50}|Mercury [A-Z]{1,50}|iconBIT)(?: S/N:[^;/]{1,50}|)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'IconBIT' + model_replacement: '$1' + - regex: '; {0,2}(IMO)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'IMO' + model_replacement: '$2' + - regex: '; {0,2}i-?mobile[ _]([^/]{1,50})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'i-mobile $1' + brand_replacement: 'imobile' + model_replacement: '$1' + - regex: '; {0,2}(i-(?:style|note)[^/]{0,10})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'i-mobile $1' + brand_replacement: 'imobile' + model_replacement: '$1' + - regex: '; {0,2}(ImPAD) ?(\d+(?:.){0,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Impression' + model_replacement: '$1 $2' + - regex: '; {0,2}(Infinix)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Infinix' + model_replacement: '$2' + - regex: '; {0,2}(Informer)[ \-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Informer' + model_replacement: '$2' + - regex: '; {0,2}(TAB) ?([78][12]4)(?: Build|\) AppleWebKit)' + device_replacement: 'Intenso $1' + brand_replacement: 'Intenso' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:Intex[ _]|)(AQUA|Aqua)([ _\.\-])([^;/]{1,100}?) {0,2}(?:Build|;)' + device_replacement: '$1$2$3' + brand_replacement: 'Intex' + model_replacement: '$1 $3' + - regex: '; {0,2}(?:INTEX|Intex)(?:[_ ]([^\ _;/]+))(?:[_ ]([^\ _;/]+)|) {0,2}(?:Build|;)' + device_replacement: '$1 $2' + brand_replacement: 'Intex' + model_replacement: '$1 $2' + - regex: '; {0,2}([iI]Buddy)[ _]?(Connect)(?:_|\?_| |)([^;/]{0,50}) {0,2}(?:Build|;)' + device_replacement: '$1 $2 $3' + brand_replacement: 'Intex' + model_replacement: 'iBuddy $2 $3' + - regex: '; {0,2}(I-Buddy)[ _]([^;/]{1,100}?) {0,2}(?:Build|;)' + device_replacement: '$1 $2' + brand_replacement: 'Intex' + model_replacement: 'iBuddy $2' + - regex: '; {0,2}(iOCEAN) ([^/]{1,50})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'iOCEAN' + model_replacement: '$2' + - regex: '; {0,2}(TP\d+(?:\.\d+|)\-\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'ionik $1' + brand_replacement: 'ionik' + model_replacement: '$1' + - regex: '; {0,2}(M702pro)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Iru' + model_replacement: '$1' + - regex: '; {0,2}itel ([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'Itel $1' + brand_replacement: 'Itel' + model_replacement: '$1' + - regex: '; {0,2}(DE88Plus|MD70)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Ivio' + model_replacement: '$1' + - regex: '; {0,2}IVIO[_\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Ivio' + model_replacement: '$1' + - regex: '; {0,2}(TPC-\d+|JAY-TECH)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Jaytech' + model_replacement: '$1' + - regex: '; {0,2}(JY-[^;/]{1,100}|G[234]S?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Jiayu' + model_replacement: '$1' + - regex: '; {0,2}(JXD)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'JXD' + model_replacement: '$2' + - regex: '; {0,2}Karbonn[ _]?([^;/]{1,100}) {0,2}(?:Build|;)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Karbonn' + model_replacement: '$1' + - regex: '; {0,2}([^;]{1,200}) Build/Karbonn' + device_replacement: '$1' + brand_replacement: 'Karbonn' + model_replacement: '$1' + - regex: '; {0,2}(A11|A39|A37|A34|ST8|ST10|ST7|Smart Tab3|Smart Tab2|Titanium S\d) +Build' + device_replacement: '$1' + brand_replacement: 'Karbonn' + model_replacement: '$1' + - regex: '; {0,2}(IS01|IS03|IS05|IS\d{2}SH)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(IS04)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Regza' + model_replacement: '$1' + - regex: '; {0,2}(IS06|IS\d{2}PT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: '; {0,2}(IS11S)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: 'Xperia Acro' + - regex: '; {0,2}(IS11CA)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Casio' + model_replacement: 'GzOne $1' + - regex: '; {0,2}(IS11LG)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: 'Optimus X' + - regex: '; {0,2}(IS11N)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Medias' + model_replacement: '$1' + - regex: '; {0,2}(IS11PT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Pantech' + model_replacement: 'MIRACH' + - regex: '; {0,2}(IS12F)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: 'Arrows ES' + - regex: '; {0,2}(IS12M)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: 'XT909' + - regex: '; {0,2}(IS12S)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: 'Xperia Acro HD' + - regex: '; {0,2}(ISW11F)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Fujitsu' + model_replacement: 'Arrowz Z' + - regex: '; {0,2}(ISW11HT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'EVO' + - regex: '; {0,2}(ISW11K)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Kyocera' + model_replacement: 'DIGNO' + - regex: '; {0,2}(ISW11M)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: 'Photon' + - regex: '; {0,2}(ISW11SC)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Samsung' + model_replacement: 'GALAXY S II WiMAX' + - regex: '; {0,2}(ISW12HT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'EVO 3D' + - regex: '; {0,2}(ISW13HT)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'J' + - regex: '; {0,2}(ISW?[0-9]{2}[A-Z]{0,2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'KDDI' + model_replacement: '$1' + - regex: '; {0,2}(INFOBAR [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'KDDI' + model_replacement: '$1' + - regex: '; {0,2}(JOYPAD|Joypad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Kingcom' + model_replacement: '$1 $2' + - regex: '; {0,2}(Vox|VOX|Arc|K080)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Kobo' + model_replacement: '$1' + - regex: '\b(Kobo Touch)\b' + device_replacement: '$1' + brand_replacement: 'Kobo' + model_replacement: '$1' + - regex: '; {0,2}(K-Touch)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Ktouch' + model_replacement: '$2' + - regex: '; {0,2}((?:EV|KM)-S\d+[A-Z]?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'KTtech' + model_replacement: '$1' + - regex: '; {0,2}(Zio|Hydro|Torque|Event|EVENT|Echo|Milano|Rise|URBANO PROGRESSO|WX04K|WX06K|WX10K|KYL21|101K|C5[12]\d{2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Kyocera' + model_replacement: '$1' + - regex: '; {0,2}(?:LAVA[ _]|)IRIS[ _\-]?([^/;\)]+) {0,2}(?:;|\)|Build)' + regex_flag: 'i' + device_replacement: 'Iris $1' + brand_replacement: 'Lava' + model_replacement: 'Iris $1' + - regex: '; {0,2}LAVA[ _]([^;/]{1,100}) Build' + device_replacement: '$1' + brand_replacement: 'Lava' + model_replacement: '$1' + - regex: '; {0,2}(?:(Aspire A1)|(?:LEMON|Lemon)[ _]([^;/]{1,100}))_?(?: Build|\) AppleWebKit)' + device_replacement: 'Lemon $1$2' + brand_replacement: 'Lemon' + model_replacement: '$1$2' + - regex: '; {0,2}(TAB-1012)(?: Build|\) AppleWebKit)' + device_replacement: 'Lenco $1' + brand_replacement: 'Lenco' + model_replacement: '$1' + - regex: '; Lenco ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Lenco $1' + brand_replacement: 'Lenco' + model_replacement: '$1' + - regex: '; {0,2}(A1_07|A2107A-H|S2005A-H|S1-37AH0) Build' + device_replacement: '$1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '; {0,2}(Idea[Tp]ab)[ _]([^;/]{1,100});? Build' + device_replacement: 'Lenovo $1 $2' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2' + - regex: '; {0,2}(Idea(?:Tab|pad)) ?([^;/]{1,100}) Build' + device_replacement: 'Lenovo $1 $2' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2' + - regex: '; {0,2}(ThinkPad) ?(Tablet) Build/' + device_replacement: 'Lenovo $1 $2' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2' + - regex: '; {0,2}(?:LNV-|)(?:=?[Ll]enovo[ _\-]?|LENOVO[ _])(.{1,200}?)(?:Build|[;/\)])' + device_replacement: 'Lenovo $1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '[;,] (?:Vodafone |)(SmartTab) ?(II) ?(\d+) Build/' + device_replacement: 'Lenovo $1 $2 $3' + brand_replacement: 'Lenovo' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(?:Ideapad |)K1 Build/' + device_replacement: 'Lenovo Ideapad K1' + brand_replacement: 'Lenovo' + model_replacement: 'Ideapad K1' + - regex: '; {0,2}(3GC101|3GW10[01]|A390) Build/' + device_replacement: '$1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '\b(?:Lenovo|LENOVO)+[ _\-]?([^,;:/ ]+)' + device_replacement: 'Lenovo $1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '; {0,2}(MFC\d+)[A-Z]{2}([^;,/]*),?(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Lexibook' + model_replacement: '$1$2' + - regex: '; {0,2}(E[34][0-9]{2}|LS[6-8][0-9]{2}|VS[6-9][0-9]+[^;/]{1,30}|Nexus 4|Nexus 5X?|GT540f?|Optimus (?:2X|G|4X HD)|OptimusX4HD) {0,2}(?:Build|;)' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '[;:] {0,2}(L-\d+[A-Z]|LGL\d+[A-Z]?)(?:/V\d+|) {0,2}(?:Build|[;\)])' + device_replacement: '$1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '; {0,2}(LG-)([A-Z]{1,2}\d{2,}[^,;/\)\(]*?)(?:Build| V\d+|[,;/\)\(]|$)' + device_replacement: '$1$2' + brand_replacement: 'LG' + model_replacement: '$2' + - regex: '; {0,2}(LG[ \-]|LG)([^;/]{1,100})[;/]? Build' + device_replacement: '$1$2' + brand_replacement: 'LG' + model_replacement: '$2' + - regex: '^(LG)-([^;/]{1,100})/ Mozilla/.{0,200}; Android' + device_replacement: '$1 $2' + brand_replacement: 'LG' + model_replacement: '$2' + - regex: '(Web0S); Linux/(SmartTV)' + device_replacement: 'LG $1 $2' + brand_replacement: 'LG' + model_replacement: '$1 $2' + - regex: '; {0,2}((?:SMB|smb)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Malata' + model_replacement: '$1' + - regex: '; {0,2}(?:Malata|MALATA) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Malata' + model_replacement: '$1' + - regex: '; {0,2}(MS[45][0-9]{3}|MID0[568][NS]?|MID[1-9]|MID[78]0[1-9]|MID970[1-9]|MID100[1-9])(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Manta' + model_replacement: '$1' + - regex: '; {0,2}(M1052|M806|M9000|M9100|M9701|MID100|MID120|MID125|MID130|MID135|MID140|MID701|MID710|MID713|MID727|MID728|MID731|MID732|MID733|MID735|MID736|MID737|MID760|MID800|MID810|MID820|MID830|MID833|MID835|MID860|MID900|MID930|MID933|MID960|MID980)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Match' + model_replacement: '$1' + - regex: '; {0,2}(GenxDroid7|MSD7.{0,200}?|AX\d.{0,200}?|Tab 701|Tab 722)(?: Build|\) AppleWebKit)' + device_replacement: 'Maxx $1' + brand_replacement: 'Maxx' + model_replacement: '$1' + - regex: '; {0,2}(M-PP[^;/]{1,30}|PhonePad ?\d{2,}[^;/]{1,30}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Mediacom $1' + brand_replacement: 'Mediacom' + model_replacement: '$1' + - regex: '; {0,2}(M-MP[^;/]{1,30}|SmartPad ?\d{2,}[^;/]{1,30}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Mediacom $1' + brand_replacement: 'Mediacom' + model_replacement: '$1' + - regex: '; {0,2}(?:MD_|)LIFETAB[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Medion Lifetab $1' + brand_replacement: 'Medion' + model_replacement: 'Lifetab $1' + - regex: '; {0,2}MEDION ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Medion $1' + brand_replacement: 'Medion' + model_replacement: '$1' + - regex: '; {0,2}(M030|M031|M035|M040|M065|m9)(?: Build|\) AppleWebKit)' + device_replacement: 'Meizu $1' + brand_replacement: 'Meizu' + model_replacement: '$1' + - regex: '; {0,2}(?:meizu_|MEIZU )(.{1,200}?) {0,2}(?:Build|[;\)])' + device_replacement: 'Meizu $1' + brand_replacement: 'Meizu' + model_replacement: '$1' + - regex: 'Quest 3' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest 3' + - regex: 'Quest 2' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest 2' + - regex: 'Quest Pro' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest Pro' + - regex: 'Quest' + device_replacement: 'Quest' + brand_replacement: 'Meta' + model_replacement: 'Quest' + - regex: '; {0,2}(?:Micromax[ _](A111|A240)|(A111|A240)) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1$2' + brand_replacement: 'Micromax' + model_replacement: '$1$2' + - regex: '; {0,2}Micromax[ _](A\d{2,3}[^;/]*) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}(A\d{2}|A[12]\d{2}|A90S|A110Q) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}Micromax[ _](P\d{3}[^;/]*) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}(P\d{3}|P\d{3}\(Funbook\)) Build' + regex_flag: 'i' + device_replacement: 'Micromax $1' + brand_replacement: 'Micromax' + model_replacement: '$1' + - regex: '; {0,2}(MITO)[ _\-]?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Mito' + model_replacement: '$2' + - regex: '; {0,2}(Cynus)[ _](F5|T\d|.{1,200}?) {0,2}(?:Build|[;/\)])' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Mobistel' + model_replacement: '$1 $2' + - regex: '; {0,2}(MODECOM |)(FreeTab) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1$2 $3' + brand_replacement: 'Modecom' + model_replacement: '$2 $3' + - regex: '; {0,2}(MODECOM )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Modecom' + model_replacement: '$2' + - regex: '; {0,2}(MZ\d{3}\+?|MZ\d{3} 4G|Xoom|XOOM[^;/]*) Build' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(Milestone )(XT[^;/]*) Build' + device_replacement: 'Motorola $1$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: '; {0,2}(Motoroi ?x|Droid X|DROIDX) Build' + regex_flag: 'i' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: 'DROID X' + - regex: '; {0,2}(Droid[^;/]*|DROID[^;/]*|Milestone[^;/]*|Photon|Triumph|Devour|Titanium) Build' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(A555|A85[34][^;/]*|A95[356]|ME[58]\d{2}\+?|ME600|ME632|ME722|MB\d{3}\+?|MT680|MT710|MT870|MT887|MT917|WX435|WX453|WX44[25]|XT\d{3,4}[A-Z\+]*|CL[iI]Q|CL[iI]Q XT) Build' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(Motorola MOT-|Motorola[ _\-]|MOT\-?)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: '; {0,2}(Moto[_ ]?|MOT\-)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: '; {0,2}((?:MP[DQ]C|MPG\d{1,4}|MP\d{3,4}|MID(?:(?:10[234]|114|43|7[247]|8[24]|7)C|8[01]1))[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Mpman' + model_replacement: '$1' + - regex: '; {0,2}(?:MSI[ _]|)(Primo\d+|Enjoy[ _\-][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'Msi' + model_replacement: '$1' + - regex: '; {0,2}Multilaser[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Multilaser' + model_replacement: '$1' + - regex: '; {0,2}(My)[_]?(Pad)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2 $3' + brand_replacement: 'MyPhone' + model_replacement: '$1$2 $3' + - regex: '; {0,2}(My)\|?(Phone)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2 $3' + brand_replacement: 'MyPhone' + model_replacement: '$3' + - regex: '; {0,2}(A\d+)[ _](Duo|)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'MyPhone' + model_replacement: '$1 $2' + - regex: '; {0,2}(myTab[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Mytab' + model_replacement: '$1' + - regex: '; {0,2}(NABI2?-)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nabi' + model_replacement: '$2' + - regex: '; {0,2}(N-\d+[CDE])(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Nec' + model_replacement: '$1' + - regex: '; ?(NEC-)(.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nec' + model_replacement: '$2' + - regex: '; {0,2}(LT-NA7)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Nec' + model_replacement: 'Lifetouch Note' + - regex: '; {0,2}(NXM\d+[A-Za-z0-9_]{0,50}|Next\d[A-Za-z0-9_ \-]{0,50}|NEXT\d[A-Za-z0-9_ \-]{0,50}|Nextbook [A-Za-z0-9_ ]{0,50}|DATAM803HC|M805)(?: Build|[\);])' + device_replacement: '$1' + brand_replacement: 'Nextbook' + model_replacement: '$1' + - regex: '; {0,2}(Nokia)([ _\-]{0,5})([^;/]{0,50}) Build' + regex_flag: 'i' + device_replacement: '$1$2$3' + brand_replacement: 'Nokia' + model_replacement: '$3' + - regex: '; {0,2}(TA\-\d{4})(?: Build|\) AppleWebKit)' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: '; {0,2}(Nook ?|Barnes & Noble Nook |BN )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nook' + model_replacement: '$2' + - regex: '; {0,2}(NOOK |)(BNRV200|BNRV200A|BNTV250|BNTV250A|BNTV400|BNTV600|LogicPD Zoom2)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Nook' + model_replacement: '$2' + - regex: '; Build/(Nook)' + device_replacement: '$1' + brand_replacement: 'Nook' + model_replacement: 'Tablet' + - regex: '; {0,2}(OP110|OliPad[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Olivetti $1' + brand_replacement: 'Olivetti' + model_replacement: '$1' + - regex: '; {0,2}OMEGA[ _\-](MID[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Omega $1' + brand_replacement: 'Omega' + model_replacement: '$1' + - regex: '^(MID7500|MID\d+) Mozilla/5\.0 \(iPad;' + device_replacement: 'Omega $1' + brand_replacement: 'Omega' + model_replacement: '$1' + - regex: '; {0,2}((?:CIUS|cius)[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'Openpeak $1' + brand_replacement: 'Openpeak' + model_replacement: '$1' + - regex: '; {0,2}(Find ?(?:5|7a)|R8[012]\d{1,2}|T703\d?|U70\d{1,2}T?|X90\d{1,2}|[AFR]\d{1,2}[a-z]{1,2})(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo $1' + brand_replacement: 'Oppo' + model_replacement: '$1' + - regex: '; {0,2}OPPO ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo $1' + brand_replacement: 'Oppo' + model_replacement: '$1' + - regex: '; {0,2}(CPH\d{1,4}|RMX\d{1,4}|P[A-Z]{3}\d{2})(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo $1' + brand_replacement: 'Oppo' + - regex: '; {0,2}(A1601)(?: Build|\) AppleWebKit)' + device_replacement: 'Oppo F1s' + brand_replacement: 'Oppo' + model_replacement: '$1' + - regex: '; {0,2}(?:Odys\-|ODYS\-|ODYS )([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1' + brand_replacement: 'Odys' + model_replacement: '$1' + - regex: '; {0,2}(SELECT) ?(7)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1 $2' + brand_replacement: 'Odys' + model_replacement: '$1 $2' + - regex: '; {0,2}(PEDI)_(PLUS)_(W)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1 $2 $3' + brand_replacement: 'Odys' + model_replacement: '$1 $2 $3' + - regex: '; {0,2}(AEON|BRAVIO|FUSION|FUSION2IN1|Genio|EOS10|IEOS[^;/]*|IRON|Loox|LOOX|LOOX Plus|Motion|NOON|NOON_PRO|NEXT|OPOS|PEDI[^;/]*|PRIME[^;/]*|STUDYTAB|TABLO|Tablet-PC-4|UNO_X8|XELIO[^;/]*|Xelio ?\d+ ?[Pp]ro|XENO10|XPRESS PRO)(?: Build|\) AppleWebKit)' + device_replacement: 'Odys $1' + brand_replacement: 'Odys' + model_replacement: '$1' + - regex: '; (ONE [a-zA-Z]\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'OnePlus $1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; (ONEPLUS [a-zA-Z]\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'OnePlus $1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; {0,2}(HD1903|GM1917|IN2025|LE2115|LE2127|HD1907|BE2012|BE2025|BE2026|BE2028|BE2029|DE2117|DE2118|EB2101|GM1900|GM1910|GM1915|HD1905|HD1925|IN2015|IN2017|IN2019|KB2005|KB2007|LE2117|LE2125|BE2015|GM1903|HD1900|HD1901|HD1910|HD1913|IN2010|IN2013|IN2020|LE2111|LE2120|LE2121|LE2123|BE2011|IN2023|KB2003|LE2113|NE2215|DN2101)(?: Build|\) AppleWebKit)' + device_replacement: 'OnePlus $1' + brand_replacement: 'OnePlus' + model_replacement: 'OnePlus $1' + - regex: '; (OnePlus[ a-zA-z0-9]{0,50});((?: Build|.{0,50}\) AppleWebKit))' + device_replacement: '$1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; (OnePlus[ a-zA-z0-9]{0,50})((?: Build|\) AppleWebKit))' + device_replacement: '$1' + brand_replacement: 'OnePlus' + model_replacement: '$1' + - regex: '; {0,2}(TP-\d+)(?: Build|\) AppleWebKit)' + device_replacement: 'Orion $1' + brand_replacement: 'Orion' + model_replacement: '$1' + - regex: '; {0,2}(G100W?)(?: Build|\) AppleWebKit)' + device_replacement: 'PackardBell $1' + brand_replacement: 'PackardBell' + model_replacement: '$1' + - regex: '; {0,2}(Panasonic)[_ ]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(FZ-A1B|JT-B1)(?: Build|\) AppleWebKit)' + device_replacement: 'Panasonic $1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(dL1|DL1)(?: Build|\) AppleWebKit)' + device_replacement: 'Panasonic $1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(SKY[ _]|)(IM\-[AT]\d{3}[^;/]{1,100}).{0,30} Build/' + device_replacement: 'Pantech $1$2' + brand_replacement: 'Pantech' + model_replacement: '$1$2' + - regex: '; {0,2}((?:ADR8995|ADR910L|ADR930L|ADR930VW|PTL21|P8000)(?: 4G|)) Build/' + device_replacement: '$1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: '; {0,2}Pantech([^;/]{1,30}).{0,200}? Build/' + device_replacement: 'Pantech $1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: '; {0,2}(papyre)[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Papyre' + model_replacement: '$2' + - regex: '; {0,2}(?:Touchlet )?(X10\.[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Pearl $1' + brand_replacement: 'Pearl' + model_replacement: '$1' + - regex: '; PHICOMM (i800)(?: Build|\) AppleWebKit)' + device_replacement: 'Phicomm $1' + brand_replacement: 'Phicomm' + model_replacement: '$1' + - regex: '; PHICOMM ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Phicomm $1' + brand_replacement: 'Phicomm' + model_replacement: '$1' + - regex: '; {0,2}(FWS\d{3}[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Phicomm $1' + brand_replacement: 'Phicomm' + model_replacement: '$1' + - regex: '; {0,2}(D633|D822|D833|T539|T939|V726|W335|W336|W337|W3568|W536|W5510|W626|W632|W6350|W6360|W6500|W732|W736|W737|W7376|W820|W832|W8355|W8500|W8510|W930)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: '; {0,2}(?:Philips|PHILIPS)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Philips $1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: 'Android 4\..{0,200}; {0,2}(M[12356789]|U[12368]|S[123])\ ?(pro)?(?: Build|\) AppleWebKit)' + device_replacement: 'Pipo $1$2' + brand_replacement: 'Pipo' + model_replacement: '$1$2' + - regex: '; {0,2}(MOMO[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Ployer' + model_replacement: '$1' + - regex: '; {0,2}(?:Polaroid[ _]|)((?:MIDC\d{3,}|PMID\d{2,}|PTAB\d{3,})[^;/]{0,30}?)(\/[^;/]{0,30}|)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Polaroid' + model_replacement: '$1' + - regex: '; {0,2}(?:Polaroid )(Tablet)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Polaroid' + model_replacement: '$1' + - regex: '; {0,2}(POMP)[ _\-](.{1,200}?) {0,2}(?:Build|[;/\)])' + device_replacement: '$1 $2' + brand_replacement: 'Pomp' + model_replacement: '$2' + - regex: '; {0,2}(TB07STA|TB10STA|TB07FTA|TB10FTA)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Positivo' + model_replacement: '$1' + - regex: '; {0,2}(?:Positivo |)((?:YPY|Ypy)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Positivo' + model_replacement: '$1' + - regex: '; {0,2}(MOB-[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'POV' + model_replacement: '$1' + - regex: '; {0,2}POV[ _\-]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'POV $1' + brand_replacement: 'POV' + model_replacement: '$1' + - regex: '; {0,2}((?:TAB-PLAYTAB|TAB-PROTAB|PROTAB|PlayTabPro|Mobii[ _\-]|TAB-P)[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'POV $1' + brand_replacement: 'POV' + model_replacement: '$1' + - regex: '; {0,2}(?:Prestigio |)((?:PAP|PMP)\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Prestigio $1' + brand_replacement: 'Prestigio' + model_replacement: '$1' + - regex: '; {0,2}(PLT[0-9]{4}.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Proscan' + model_replacement: '$1' + - regex: '; {0,2}(A2|A5|A8|A900)_?(Classic|)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Qmobile' + model_replacement: '$1 $2' + - regex: '; {0,2}(Q[Mm]obile)_([^_]+)_([^_]+?)(?: Build|\) AppleWebKit)' + device_replacement: 'Qmobile $2 $3' + brand_replacement: 'Qmobile' + model_replacement: '$2 $3' + - regex: '; {0,2}(Q\-?[Mm]obile)[_ ](A[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Qmobile $2' + brand_replacement: 'Qmobile' + model_replacement: '$2' + - regex: '; {0,2}(Q\-Smart)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Qmobilevn' + model_replacement: '$2' + - regex: '; {0,2}(Q\-?[Mm]obile)[ _\-](S[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Qmobilevn' + model_replacement: '$2' + - regex: '; {0,2}(TA1013)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Quanta' + model_replacement: '$1' + - regex: '; (RCT\w+)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'RCA' + model_replacement: '$1' + - regex: '; RCA (\w+)(?: Build|\) AppleWebKit)' + device_replacement: 'RCA $1' + brand_replacement: 'RCA' + model_replacement: '$1' + - regex: '; {0,2}(RK\d+),?(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Rockchip' + model_replacement: '$1' + - regex: ' Build/(RK\d+)' + device_replacement: '$1' + brand_replacement: 'Rockchip' + model_replacement: '$1' + - regex: '; {0,2}(SAMSUNG |Samsung |)((?:Galaxy (?:Note II|S\d)|GT-I9082|GT-I9205|GT-N7\d{3}|SM-N9005)[^;/]{0,100})\/?[^;/]{0,50} Build/' + device_replacement: 'Samsung $1$2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(Google |)(Nexus [Ss](?: 4G|)) Build/' + device_replacement: 'Samsung $1$2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(SAMSUNG |Samsung )([^\/]{0,50})\/[^ ]{0,50} Build/' + device_replacement: 'Samsung $2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(Galaxy(?: Ace| Nexus| S ?II+|Nexus S| with MCR 1.2| Mini Plus 4G|)) Build/' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(SAMSUNG[ _\-]|)(?:SAMSUNG[ _\-])([^;/]{1,100}) Build' + device_replacement: 'Samsung $2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}(SAMSUNG-|)(GT\-[BINPS]\d{4}[^\/]{0,50})(\/[^ ]{0,50}) Build' + device_replacement: 'Samsung $1$2$3' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '(?:; {0,2}|^)((?:GT\-[BIiNPS]\d{4}|I9\d{2}0[A-Za-z\+]?\b)[^;/\)]*?)(?:Build|Linux|MIUI|[;/\)])' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; (SAMSUNG-)([A-Za-z0-9\-]{0,50}).{0,200} Build/' + device_replacement: 'Samsung $1$2' + brand_replacement: 'Samsung' + model_replacement: '$2' + - regex: '; {0,2}((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|) Build' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}((?:SC)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|)\)' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: ' ((?:SCH)\-[A-Za-z0-9 ]{1,50})(/?[^ ]*|) Build' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(Behold ?(?:2|II)|YP\-G[^;/]{1,100}|EK-GC100|SCL21|I9300) Build' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}((?:SCH|SGH|SHV|SHW|SPH|SC|SM)\-[A-Za-z0-9]{5,6})[\)]' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '; {0,2}(SH\-?\d\d[^;/]{1,100}|SBM\d[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(SHARP[ -])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Sharp' + model_replacement: '$2' + - regex: '; {0,2}(SPX[_\-]\d[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Simvalley' + model_replacement: '$1' + - regex: '; {0,2}(SX7\-PEARL\.GmbH)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Simvalley' + model_replacement: '$1' + - regex: '; {0,2}(SP[T]?\-\d{2}[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Simvalley' + model_replacement: '$1' + - regex: '; {0,2}(SK\-.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'SKtelesys' + model_replacement: '$1' + - regex: '; {0,2}(?:SKYTEX|SX)-([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Skytex' + model_replacement: '$1' + - regex: '; {0,2}(IMAGINE [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Skytex' + model_replacement: '$1' + - regex: '; {0,2}(SmartQ) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(WF7C|WF10C|SBT[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Smartbitt' + model_replacement: '$1' + - regex: '; {0,2}(SBM(?:003SH|005SH|006SH|007SH|102SH)) Build' + device_replacement: '$1' + brand_replacement: 'Sharp' + model_replacement: '$1' + - regex: '; {0,2}(003P|101P|101P11C|102P) Build' + device_replacement: '$1' + brand_replacement: 'Panasonic' + model_replacement: '$1' + - regex: '; {0,2}(00\dZ) Build/' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; HTC(X06HT) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(001HT|X06HT) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: '; {0,2}(201M) Build' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: 'XT902' + - regex: '; {0,2}(ST\d{4}.{0,200})Build/ST' + device_replacement: 'Trekstor $1' + brand_replacement: 'Trekstor' + model_replacement: '$1' + - regex: '; {0,2}(ST\d{4}.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Trekstor $1' + brand_replacement: 'Trekstor' + model_replacement: '$1' + - regex: '; {0,2}(Sony ?Ericsson ?)([^;/]{1,100}) Build' + device_replacement: '$1$2' + brand_replacement: 'SonyEricsson' + model_replacement: '$2' + - regex: '; {0,2}((?:SK|ST|E|X|LT|MK|MT|WT)\d{2}[a-z0-9]*(?:-o|)|R800i|U20i) Build' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: '; {0,2}(Xperia (?:A8|Arc|Acro|Active|Live with Walkman|Mini|Neo|Play|Pro|Ray|X\d+)[^;/]{0,50}) Build' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: '; Sony (Tablet[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Sony $1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; Sony ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Sony $1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(Sony)([A-Za-z0-9\-]+)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(Xperia [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(C(?:1[0-9]|2[0-9]|53|55|6[0-9])[0-9]{2}|D[25]\d{3}|D6[56]\d{2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(SGP\d{3}|SGPT\d{2})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}(NW-Z1000Series)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: 'PLAYSTATION 3' + device_replacement: 'PlayStation 3' + brand_replacement: 'Sony' + model_replacement: 'PlayStation 3' + - regex: '(PlayStation (?:Portable|Vita|\d+))' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1' + - regex: '; {0,2}((?:CSL_Spice|Spice|SPICE|CSL)[ _\-]?|)([Mm][Ii])([ _\-]|)(\d{3}[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2$3$4' + brand_replacement: 'Spice' + model_replacement: 'Mi$4' + - regex: '; {0,2}(Sprint )(.{1,200}?) {0,2}(?:Build|[;/])' + device_replacement: '$1$2' + brand_replacement: 'Sprint' + model_replacement: '$2' + - regex: '\b(Sprint)[: ]([^;,/ ]+)' + device_replacement: '$1$2' + brand_replacement: 'Sprint' + model_replacement: '$2' + - regex: '; {0,2}(TAGI[ ]?)(MID) ?([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2$3' + brand_replacement: 'Tagi' + model_replacement: '$2$3' + - regex: '; {0,2}(Oyster500|Opal 800)(?: Build|\) AppleWebKit)' + device_replacement: 'Tecmobile $1' + brand_replacement: 'Tecmobile' + model_replacement: '$1' + - regex: '; {0,2}(TECNO[ _])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Tecno' + model_replacement: '$2' + - regex: '; {0,2}Android for (Telechips|Techvision) ([^ ]+) ' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(T-Hub2)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Telstra' + model_replacement: '$1' + - regex: '; {0,2}(PAD) ?(100[12])(?: Build|\) AppleWebKit)' + device_replacement: 'Terra $1$2' + brand_replacement: 'Terra' + model_replacement: '$1$2' + - regex: '; {0,2}(T[BM]-\d{3}[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Texet' + model_replacement: '$1' + - regex: '; {0,2}(tolino [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Thalia' + model_replacement: '$1' + - regex: '; {0,2}Build/.{0,200} (TOLINO_BROWSER)' + device_replacement: '$1' + brand_replacement: 'Thalia' + model_replacement: 'Tolino Shine' + - regex: '; {0,2}(?:CJ[ -])?(ThL|THL)[ -]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Thl' + model_replacement: '$2' + - regex: '; {0,2}(T100|T200|T5|W100|W200|W8s)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Thl' + model_replacement: '$1' + - regex: '; {0,2}(T-Mobile[ _]G2[ _]Touch) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Hero' + - regex: '; {0,2}(T-Mobile[ _]G2) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Desire Z' + - regex: '; {0,2}(T-Mobile myTouch Q) Build' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: 'U8730' + - regex: '; {0,2}(T-Mobile myTouch) Build' + device_replacement: '$1' + brand_replacement: 'Huawei' + model_replacement: 'U8680' + - regex: '; {0,2}(T-Mobile_Espresso) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Espresso' + - regex: '; {0,2}(T-Mobile G1) Build' + device_replacement: '$1' + brand_replacement: 'HTC' + model_replacement: 'Dream' + - regex: '\b(T-Mobile ?|)(myTouch)[ _]?([34]G)[ _]?([^\/]*) (?:Mozilla|Build)' + device_replacement: '$1$2 $3 $4' + brand_replacement: 'HTC' + model_replacement: '$2 $3 $4' + - regex: '\b(T-Mobile)_([^_]+)_(.{0,200}) Build' + device_replacement: '$1 $2 $3' + brand_replacement: 'Tmobile' + model_replacement: '$2 $3' + - regex: '\b(T-Mobile)[_ ]?(.{0,200}?)Build' + device_replacement: '$1 $2' + brand_replacement: 'Tmobile' + model_replacement: '$2' + - regex: ' (ATP[0-9]{4})(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Tomtec' + model_replacement: '$1' + - regex: ' ?(TOOKY)[ _\-]([^;/]{1,100}) ?(?:Build|;)' + regex_flag: 'i' + device_replacement: '$1 $2' + brand_replacement: 'Tooky' + model_replacement: '$2' + - regex: '\b(TOSHIBA_AC_AND_AZ|TOSHIBA_FOLIO_AND_A|FOLIO_AND_A)' + device_replacement: '$1' + brand_replacement: 'Toshiba' + model_replacement: 'Folio 100' + - regex: '; {0,2}([Ff]olio ?100)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Toshiba' + model_replacement: 'Folio 100' + - regex: '; {0,2}(AT[0-9]{2,3}(?:\-A|LE\-A|PE\-A|SE|a|)|AT7-A|AT1S0|Hikari-iFrame/WDPF-[^;/]{1,100}|THRiVE|Thrive)(?: Build|\) AppleWebKit)' + device_replacement: 'Toshiba $1' + brand_replacement: 'Toshiba' + model_replacement: '$1' + - regex: '; {0,2}(TM-MID\d+[^;/]{1,50}|TOUCHMATE|MID-750)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Touchmate' + model_replacement: '$1' + - regex: '; {0,2}(TM-SM\d+[^;/]{1,50}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Touchmate' + model_replacement: '$1' + - regex: '; {0,2}(A10 [Bb]asic2?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Treq' + model_replacement: '$1' + - regex: '; {0,2}(TREQ[ _\-])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1$2' + brand_replacement: 'Treq' + model_replacement: '$2' + - regex: '; {0,2}(X-?5|X-?3)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Umeox' + model_replacement: '$1' + - regex: '; {0,2}(A502\+?|A936|A603|X1|X2)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Umeox' + model_replacement: '$1' + - regex: '; thor Build/' + device_replacement: 'Thor' + brand_replacement: 'Vernee' + model_replacement: 'Thor' + - regex: '; Thor (E)? Build/' + device_replacement: 'Thor $1' + brand_replacement: 'Vernee' + model_replacement: 'Thor' + - regex: '; Apollo Lite Build/' + device_replacement: 'Apollo Lite' + brand_replacement: 'Vernee' + model_replacement: 'Apollo' + - regex: '(TOUCH(?:TAB|PAD).{1,200}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Versus $1' + brand_replacement: 'Versus' + model_replacement: '$1' + - regex: '(VERTU) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'Vertu' + model_replacement: '$2' + - regex: '; {0,2}(Videocon)[ _\-]([^;/]{1,100}?) {0,2}(?:Build|;)' + device_replacement: '$1 $2' + brand_replacement: 'Videocon' + model_replacement: '$2' + - regex: ' (VT\d{2}[A-Za-z]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Videocon' + model_replacement: '$1' + - regex: '; {0,2}((?:ViewPad|ViewPhone|VSD)[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Viewsonic' + model_replacement: '$1' + - regex: '; {0,2}(ViewSonic-)([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'Viewsonic' + model_replacement: '$2' + - regex: '; {0,2}(GTablet.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Viewsonic' + model_replacement: '$1' + - regex: '; {0,2}([Vv]ivo)[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'vivo' + model_replacement: '$2' + - regex: '(Vodafone) (.{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(?:Walton[ _\-]|)(Primo[ _\-][^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Walton $1' + brand_replacement: 'Walton' + model_replacement: '$1' + - regex: '; {0,2}(?:WIKO[ \-]|)(CINK\+?|BARRY|BLOOM|DARKFULL|DARKMOON|DARKNIGHT|DARKSIDE|FIZZ|HIGHWAY|IGGY|OZZY|RAINBOW|STAIRWAY|SUBLIM|WAX|CINK [^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Wiko $1' + brand_replacement: 'Wiko' + model_replacement: '$1' + - regex: '; {0,2}WellcoM-([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Wellcom $1' + brand_replacement: 'Wellcom' + model_replacement: '$1' + - regex: '(?:(WeTab)-Browser|; (wetab) Build)' + device_replacement: '$1' + brand_replacement: 'WeTab' + model_replacement: 'WeTab' + - regex: '; {0,2}(AT-AS[^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Wolfgang $1' + brand_replacement: 'Wolfgang' + model_replacement: '$1' + - regex: '; {0,2}(?:Woxter|Wxt) ([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'Woxter $1' + brand_replacement: 'Woxter' + model_replacement: '$1' + - regex: '; {0,2}(?:Xenta |Luna |)(TAB[234][0-9]{2}|TAB0[78]-\d{3}|TAB0?9-\d{3}|TAB1[03]-\d{3}|SMP\d{2}-\d{3})(?: Build|\) AppleWebKit)' + device_replacement: 'Yarvik $1' + brand_replacement: 'Yarvik' + model_replacement: '$1' + - regex: '; {0,2}([A-Z]{2,4})(M\d{3,}[A-Z]{2})([^;\)\/]*)(?: Build|[;\)])' + device_replacement: 'Yifang $1$2$3' + brand_replacement: 'Yifang' + model_replacement: '$2' + - regex: '; {0,2}((Mi|MI|HM|MI-ONE|Redmi)[ -](NOTE |Note |)[^;/]*) (Build|MIUI)/' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}((Mi|MI|HM|MI-ONE|Redmi)[ -](NOTE |Note |)[^;/\)]*)' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}(MIX) (Build|MIUI)/' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}((MIX) ([^;/]*)) (Build|MIUI)/' + device_replacement: 'XiaoMi $1' + brand_replacement: 'XiaoMi' + model_replacement: '$1' + - regex: '; {0,2}XOLO[ _]([^;/]{0,30}tab.{0,30})(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Xolo $1' + brand_replacement: 'Xolo' + model_replacement: '$1' + - regex: '; {0,2}XOLO[ _]([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Xolo $1' + brand_replacement: 'Xolo' + model_replacement: '$1' + - regex: '; {0,2}(q\d0{2,3}[a-z]?)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: 'Xolo $1' + brand_replacement: 'Xolo' + model_replacement: '$1' + - regex: '; {0,2}(PAD ?[79]\d+[^;/]{0,50}|TelePAD\d+[^;/])(?: Build|\) AppleWebKit)' + device_replacement: 'Xoro $1' + brand_replacement: 'Xoro' + model_replacement: '$1' + - regex: '; {0,2}(?:(?:ZOPO|Zopo)[ _]([^;/]{1,100}?)|(ZP ?(?:\d{2}[^;/]{1,100}|C2))|(C[2379]))(?: Build|\) AppleWebKit)' + device_replacement: '$1$2$3' + brand_replacement: 'Zopo' + model_replacement: '$1$2$3' + - regex: '; {0,2}(ZiiLABS) (Zii[^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'ZiiLabs' + model_replacement: '$2' + - regex: '; {0,2}(Zii)_([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'ZiiLabs' + model_replacement: '$2' + - regex: '; {0,2}(ARIZONA|(?:ATLAS|Atlas) W|D930|Grand (?:[SX][^;]{0,200}?|Era|Memo[^;]{0,200}?)|JOE|(?:Kis|KIS)\b[^;]{0,200}?|Libra|Light [^;]{0,200}?|N8[056][01]|N850L|N8000|N9[15]\d{2}|N9810|NX501|Optik|(?:Vip )Racer[^;]{0,200}?|RacerII|RACERII|San Francisco[^;]{0,200}?|V9[AC]|V55|V881|Z[679][0-9]{2}[A-z]?)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}([A-Z]\d+)_USA_[^;]{0,200}(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}(SmartTab\d+)[^;]{0,50}(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}(?:Blade|BLADE|ZTE-BLADE)([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'ZTE Blade$1' + brand_replacement: 'ZTE' + model_replacement: 'Blade$1' + - regex: '; {0,2}(?:Skate|SKATE|ZTE-SKATE)([^;/]*)(?: Build|\) AppleWebKit)' + device_replacement: 'ZTE Skate$1' + brand_replacement: 'ZTE' + model_replacement: 'Skate$1' + - regex: '; {0,2}(Orange |Optimus )(Monte Carlo|San Francisco)(?: Build|\) AppleWebKit)' + device_replacement: '$1$2' + brand_replacement: 'ZTE' + model_replacement: '$1$2' + - regex: '; {0,2}(?:ZXY-ZTE_|ZTE\-U |ZTE[\- _]|ZTE-C[_ ])([^;/]{1,100}?)(?: Build|\) AppleWebKit)' + device_replacement: 'ZTE $1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; (BASE) (lutea|Lutea 2|Tab[^;]{0,200}?)(?: Build|\) AppleWebKit)' + device_replacement: '$1 $2' + brand_replacement: 'ZTE' + model_replacement: '$1 $2' + - regex: '; (Avea inTouch 2|soft stone|tmn smart a7|Movistar[ _]Link)(?: Build|\) AppleWebKit)' + regex_flag: 'i' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; {0,2}(vp9plus)\)' + device_replacement: '$1' + brand_replacement: 'ZTE' + model_replacement: '$1' + - regex: '; ?(Cloud[ _]Z5|z1000|Z99 2G|z99|z930|z999|z990|z909|Z919|z900)(?: Build|\) AppleWebKit)' + device_replacement: '$1' + brand_replacement: 'Zync' + model_replacement: '$1' + - regex: '; ?(KFOT|Kindle Fire) Build\b' + device_replacement: 'Kindle Fire' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire' + - regex: '; ?(KFOTE|Amazon Kindle Fire2) Build\b' + device_replacement: 'Kindle Fire 2' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire 2' + - regex: '; ?(KFTT) Build\b' + device_replacement: 'Kindle Fire HD' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 7"' + - regex: '; ?(KFJWI) Build\b' + device_replacement: 'Kindle Fire HD 8.9" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 8.9" WiFi' + - regex: '; ?(KFJWA) Build\b' + device_replacement: 'Kindle Fire HD 8.9" 4G' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 8.9" 4G' + - regex: '; ?(KFSOWI) Build\b' + device_replacement: 'Kindle Fire HD 7" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HD 7" WiFi' + - regex: '; ?(KFTHWI) Build\b' + device_replacement: 'Kindle Fire HDX 7" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 7" WiFi' + - regex: '; ?(KFTHWA) Build\b' + device_replacement: 'Kindle Fire HDX 7" 4G' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 7" 4G' + - regex: '; ?(KFAPWI) Build\b' + device_replacement: 'Kindle Fire HDX 8.9" WiFi' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 8.9" WiFi' + - regex: '; ?(KFAPWA) Build\b' + device_replacement: 'Kindle Fire HDX 8.9" 4G' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire HDX 8.9" 4G' + - regex: '; ?Amazon ([^;/]{1,100}) Build\b' + device_replacement: '$1' + brand_replacement: 'Amazon' + model_replacement: '$1' + - regex: '; ?(Kindle) Build\b' + device_replacement: 'Kindle' + brand_replacement: 'Amazon' + model_replacement: 'Kindle' + - regex: '; ?(Silk)/(\d+)\.(\d+)(?:\.([0-9\-]+)|) Build\b' + device_replacement: 'Kindle Fire' + brand_replacement: 'Amazon' + model_replacement: 'Kindle Fire$2' + - regex: ' (Kindle)/(\d+\.\d+)' + device_replacement: 'Kindle' + brand_replacement: 'Amazon' + model_replacement: '$1 $2' + - regex: ' (Silk|Kindle)/(\d+)\.' + device_replacement: 'Kindle' + brand_replacement: 'Amazon' + model_replacement: 'Kindle' + - regex: '(sprd)\-([^/]{1,50})/' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '; {0,2}(H\d{2}00\+?) Build' + device_replacement: '$1' + brand_replacement: 'Hero' + model_replacement: '$1' + - regex: '; {0,2}(iphone|iPhone5) Build/' + device_replacement: 'Xianghe $1' + brand_replacement: 'Xianghe' + model_replacement: '$1' + - regex: '; {0,2}(e\d{4}[a-z]?_?v\d+|v89_[^;/]{1,100})[^;/]{1,30} Build/' + device_replacement: 'Xianghe $1' + brand_replacement: 'Xianghe' + model_replacement: '$1' + - regex: '\bUSCC[_\-]?([^ ;/\)]+)' + device_replacement: '$1' + brand_replacement: 'Cellular' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:ALCATEL)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:ASUS|Asus)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:DELL|Dell)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:HTC|Htc|HTC_blocked[^;]{0,200})[^;]{0,200}; {0,2}(?:HTC|)([^;,\)]+)' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:HUAWEI)[^;]{0,200}; {0,2}(?:HUAWEI |)([^;,\)]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:LG|Lg)[^;]{0,200}; {0,2}(?:LG[ \-]|)([^;,\)]+)' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:rv:11; |)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(?:NOKIA ?|Nokia ?|LUMIA ?|[Ll]umia ?|)(\d{3,10}[^;\)]*)' + device_replacement: 'Lumia $1' + brand_replacement: 'Nokia' + model_replacement: 'Lumia $1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(RM-\d{3,})' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: '(?:Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)]|WPDesktop;) ?(?:ARM; ?Touch; ?|Touch; ?|)(?:NOKIA|Nokia)[^;]{0,200}; {0,2}(?:NOKIA ?|Nokia ?|LUMIA ?|[Ll]umia ?|)([^;\)]+)' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|)(?:Microsoft(?: Corporation|))[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Microsoft $1' + brand_replacement: 'Microsoft' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:SAMSUNG)[^;]{0,200}; {0,2}(?:SAMSUNG |)([^;,\.\)]+)' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)(?:TOSHIBA|FujitsuToshibaMobileCommun)[^;]{0,200}; {0,2}([^;,\)]+)' + device_replacement: 'Toshiba $1' + brand_replacement: 'Toshiba' + model_replacement: '$1' + - regex: 'Windows Phone [^;]{1,30}; .{0,100}?IEMobile/[^;\)]+[;\)] ?(?:ARM; ?Touch; ?|Touch; ?|WpsLondonTest; ?|)([^;]{1,200}); {0,2}([^;,\)]+)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '(?:^|; )SAMSUNG\-([A-Za-z0-9\-]{1,50}).{0,200} Bada/' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '\(Mobile; ALCATEL ?(One|ONE) ?(Touch|TOUCH) ?([^;/]{1,100}?)(?:/[^;]{1,200}|); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/' + device_replacement: 'Alcatel $1 $2 $3' + brand_replacement: 'Alcatel' + model_replacement: 'One Touch $3' + - regex: '\(Mobile; (?:ZTE([^;]{1,200})|(OpenC)); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/' + device_replacement: 'ZTE $1$2' + brand_replacement: 'ZTE' + model_replacement: '$1$2' + - regex: '\(Mobile; ALCATEL([A-Za-z0-9\-]+); rv:[^\)]{1,200}\) Gecko/[^\/]{1,200} Firefox/[^\/]{1,200} KaiOS/' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: '\(Mobile; LYF\/([A-Za-z0-9\-]{1,100})\/.{0,100};.{0,100}rv:[^\)]{1,100}\) Gecko/[^\/]{1,100} Firefox/[^\/]{1,100} KAIOS/' + device_replacement: 'LYF $1' + brand_replacement: 'LYF' + model_replacement: '$1' + - regex: '\(Mobile; Nokia_([A-Za-z0-9\-]{1,100})_.{1,100}; rv:[^\)]{1,100}\) Gecko/[^\/]{1,100} Firefox/[^\/]{1,100} KAIOS/' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: 'Nokia(N[0-9]+)([A-Za-z_\-][A-Za-z0-9_\-]*)' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1$2' + - regex: '(?:NOKIA|Nokia)(?:\-| {0,2})(?:([A-Za-z0-9]+)\-[0-9a-f]{32}|([A-Za-z0-9\-]+)(?:UCBrowser)|([A-Za-z0-9\-]+))' + device_replacement: 'Nokia $1$2$3' + brand_replacement: 'Nokia' + model_replacement: '$1$2$3' + - regex: 'Lumia ([A-Za-z0-9\-]+)' + device_replacement: 'Lumia $1' + brand_replacement: 'Nokia' + model_replacement: 'Lumia $1' + - regex: '\(Symbian; U; S60 V5; [A-z]{2}\-[A-z]{2}; (SonyEricsson|Samsung|Nokia|LG)([^;/]{1,100}?)\)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '\(Symbian(?:/3|); U; ([^;]{1,200});' + device_replacement: 'Nokia $1' + brand_replacement: 'Nokia' + model_replacement: '$1' + - regex: 'BB10; ([A-Za-z0-9\- ]+)\)' + device_replacement: 'BlackBerry $1' + brand_replacement: 'BlackBerry' + model_replacement: '$1' + - regex: 'Play[Bb]ook.{1,200}RIM Tablet OS' + device_replacement: 'BlackBerry Playbook' + brand_replacement: 'BlackBerry' + model_replacement: 'Playbook' + - regex: 'Black[Bb]erry ([0-9]+);' + device_replacement: 'BlackBerry $1' + brand_replacement: 'BlackBerry' + model_replacement: '$1' + - regex: 'Black[Bb]erry([0-9]+)' + device_replacement: 'BlackBerry $1' + brand_replacement: 'BlackBerry' + model_replacement: '$1' + - regex: 'Black[Bb]erry;' + device_replacement: 'BlackBerry' + brand_replacement: 'BlackBerry' + - regex: '(Pre|Pixi)/\d+\.\d+' + device_replacement: 'Palm $1' + brand_replacement: 'Palm' + model_replacement: '$1' + - regex: 'Palm([0-9]+)' + device_replacement: 'Palm $1' + brand_replacement: 'Palm' + model_replacement: '$1' + - regex: 'Treo([A-Za-z0-9]+)' + device_replacement: 'Palm Treo $1' + brand_replacement: 'Palm' + model_replacement: 'Treo $1' + - regex: 'webOS.{0,200}(P160U(?:NA|))/(\d+).(\d+)' + device_replacement: 'HP Veer' + brand_replacement: 'HP' + model_replacement: 'Veer' + - regex: '(Touch[Pp]ad)/\d+\.\d+' + device_replacement: 'HP TouchPad' + brand_replacement: 'HP' + model_replacement: 'TouchPad' + - regex: 'HPiPAQ([A-Za-z0-9]{1,20})/\d+\.\d+' + device_replacement: 'HP iPAQ $1' + brand_replacement: 'HP' + model_replacement: 'iPAQ $1' + - regex: 'PDA; (PalmOS)/sony/model ([a-z]+)/Revision' + device_replacement: '$1' + brand_replacement: 'Sony' + model_replacement: '$1 $2' + - regex: '(Apple\s?TV)' + device_replacement: 'AppleTV' + brand_replacement: 'Apple' + model_replacement: 'AppleTV' + - regex: '(QtCarBrowser)' + device_replacement: 'Tesla Model S' + brand_replacement: 'Tesla' + model_replacement: 'Model S' + - regex: '(iPhone|iPad|iPod)(\d+,\d+)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1$2' + - regex: '(iPad)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(iPod)(?:;| touch;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(iPhone)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(Watch)(\d+,\d+)' + device_replacement: 'Apple $1' + brand_replacement: 'Apple' + model_replacement: '$1$2' + - regex: '(Apple Watch)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: '(HomePod)(?:;| Simulator;)' + device_replacement: '$1' + brand_replacement: 'Apple' + model_replacement: '$1' + - regex: 'iPhone' + device_replacement: 'iPhone' + brand_replacement: 'Apple' + model_replacement: 'iPhone' + - regex: 'CFNetwork/.{0,100} Darwin/\d.{0,100}\(((?:Mac|iMac|PowerMac|PowerBook)[^\d]*)(\d+)(?:,|%2C)(\d+)' + device_replacement: '$1$2,$3' + brand_replacement: 'Apple' + model_replacement: '$1$2,$3' + - regex: 'CFNetwork/.{0,100} Darwin/\d+\.\d+\.\d+ \(x86_64\)' + device_replacement: 'Mac' + brand_replacement: 'Apple' + model_replacement: 'Mac' + - regex: 'CFNetwork/.{0,100} Darwin/\d' + device_replacement: 'iOS-Device' + brand_replacement: 'Apple' + model_replacement: 'iOS-Device' + - regex: 'Outlook-(iOS)/\d+\.\d+\.prod\.iphone' + brand_replacement: 'Apple' + device_replacement: 'iPhone' + model_replacement: 'iPhone' + - regex: 'acer_([A-Za-z0-9]+)_' + device_replacement: 'Acer $1' + brand_replacement: 'Acer' + model_replacement: '$1' + - regex: '(?:ALCATEL|Alcatel)-([A-Za-z0-9\-]+)' + device_replacement: 'Alcatel $1' + brand_replacement: 'Alcatel' + model_replacement: '$1' + - regex: '(?:Amoi|AMOI)\-([A-Za-z0-9]+)' + device_replacement: 'Amoi $1' + brand_replacement: 'Amoi' + model_replacement: '$1' + - regex: '(?:; |\/|^)((?:Transformer (?:Pad|Prime) |Transformer |PadFone[ _]?)[A-Za-z0-9]*)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '(?:asus.{0,200}?ASUS|Asus|ASUS|asus)[\- ;]*((?:Transformer (?:Pad|Prime) |Transformer |Padfone |Nexus[ _]|)[A-Za-z0-9]+)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '(?:ASUS)_([A-Za-z0-9\-]+)' + device_replacement: 'Asus $1' + brand_replacement: 'Asus' + model_replacement: '$1' + - regex: '\bBIRD[ \-\.]([A-Za-z0-9]+)' + device_replacement: 'Bird $1' + brand_replacement: 'Bird' + model_replacement: '$1' + - regex: '\bDell ([A-Za-z0-9]+)' + device_replacement: 'Dell $1' + brand_replacement: 'Dell' + model_replacement: '$1' + - regex: 'DoCoMo/2\.0 ([A-Za-z0-9]+)' + device_replacement: 'DoCoMo $1' + brand_replacement: 'DoCoMo' + model_replacement: '$1' + - regex: '^.{0,50}?([A-Za-z0-9]{1,30})_W;FOMA' + device_replacement: 'DoCoMo $1' + brand_replacement: 'DoCoMo' + model_replacement: '$1' + - regex: '^.{0,50}?([A-Za-z0-9]{1,30});FOMA' + device_replacement: 'DoCoMo $1' + brand_replacement: 'DoCoMo' + model_replacement: '$1' + - regex: '\b(?:HTC/|HTC/[a-z0-9]{1,20}/|)HTC[ _\-;]? {0,2}(.{0,200}?)(?:-?Mozilla|fingerPrint|[;/\(\)]|$)' + device_replacement: 'HTC $1' + brand_replacement: 'HTC' + model_replacement: '$1' + - regex: 'Huawei([A-Za-z0-9]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'HUAWEI-([A-Za-z0-9]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'HUAWEI ([A-Za-z0-9\-]+)' + device_replacement: 'Huawei $1' + brand_replacement: 'Huawei' + model_replacement: '$1' + - regex: 'vodafone([A-Za-z0-9]+)' + device_replacement: 'Huawei Vodafone $1' + brand_replacement: 'Huawei' + model_replacement: 'Vodafone $1' + - regex: 'i\-mate ([A-Za-z0-9]+)' + device_replacement: 'i-mate $1' + brand_replacement: 'i-mate' + model_replacement: '$1' + - regex: 'Kyocera\-([A-Za-z0-9]+)' + device_replacement: 'Kyocera $1' + brand_replacement: 'Kyocera' + model_replacement: '$1' + - regex: 'KWC\-([A-Za-z0-9]+)' + device_replacement: 'Kyocera $1' + brand_replacement: 'Kyocera' + model_replacement: '$1' + - regex: 'Lenovo[_\-]([A-Za-z0-9]+)' + device_replacement: 'Lenovo $1' + brand_replacement: 'Lenovo' + model_replacement: '$1' + - regex: '(HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ \( ?;(LG)E ?;([^;]{0,30})' + device_replacement: '$1' + brand_replacement: '$2' + model_replacement: '$3' + - regex: '(HbbTV)/1\.1\.1.{0,200}CE-HTML/1\.\d;(Vendor/|)(THOM[^;]{0,200}?)[;\s].{0,30}(LF[^;]{1,200});?' + device_replacement: '$1' + brand_replacement: 'Thomson' + model_replacement: '$4' + - regex: '(HbbTV)(?:/1\.1\.1|) ?(?: \(;;;;;\)|); {0,2}CE-HTML(?:/1\.\d|); {0,2}([^ ]{1,30}) ([^;]{1,200});' + device_replacement: '$1' + brand_replacement: '$2' + model_replacement: '$3' + - regex: '(HbbTV)/1\.1\.1 \(;;;;;\) Maple_2011' + device_replacement: '$1' + brand_replacement: 'Samsung' + - regex: '(HbbTV)/[0-9]+\.[0-9]+\.[0-9]+ \([^;]{0,30}; ?(?:CUS:([^;]{0,200})|([^;]{1,200})) ?; ?([^;]{0,30})' + device_replacement: '$1' + brand_replacement: '$2$3' + model_replacement: '$4' + - regex: '(HbbTV)/[0-9]+\.[0-9]+\.[0-9]+' + device_replacement: '$1' + - regex: 'LGE; (?:Media\/|)([^;]{0,200});[^;]{0,200};[^;]{0,200};?\); "?LG NetCast(\.TV|\.Media|)-\d+' + device_replacement: 'NetCast$2' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: 'InettvBrowser/[0-9]{1,30}\.[0-9A-Z]{1,30} \([^;]{0,200};(Sony)([^;]{0,200});[^;]{0,200};[^\)]{0,10}\)' + device_replacement: 'Inettv' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'InettvBrowser/[0-9]{1,30}\.[0-9A-Z]{1,30} \([^;]{0,200};([^;]{0,200});[^;]{0,200};[^\)]{0,10}\)' + device_replacement: 'Inettv' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '(?:InettvBrowser|TSBNetTV|NETTV|HBBTV)' + device_replacement: 'Inettv' + brand_replacement: 'Generic_Inettv' + - regex: 'Series60/\d\.\d (LG)[\-]?([A-Za-z0-9 \-]+)' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: '\b(?:LGE[ \-]LG\-(?:AX|)|LGE |LGE?-LG|LGE?[ \-]|LG[ /\-]|lg[\-])([A-Za-z0-9]+)\b' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '(?:^LG[\-]?|^LGE[\-/]?)([A-Za-z]+[0-9]+[A-Za-z]*)' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '^LG([0-9]+[A-Za-z]*)' + device_replacement: 'LG $1' + brand_replacement: 'LG' + model_replacement: '$1' + - regex: '(KIN\.[^ ]+) (\d+)\.(\d+)' + device_replacement: 'Microsoft $1' + brand_replacement: 'Microsoft' + model_replacement: '$1' + - regex: '(?:MSIE|XBMC).{0,200}\b(Xbox)\b' + device_replacement: '$1' + brand_replacement: 'Microsoft' + model_replacement: '$1' + - regex: '; ARM; Trident/6\.0; Touch[\);]' + device_replacement: 'Microsoft Surface RT' + brand_replacement: 'Microsoft' + model_replacement: 'Surface RT' + - regex: 'Motorola\-([A-Za-z0-9]+)' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: 'MOTO\-([A-Za-z0-9]+)' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: 'MOT\-([A-z0-9][A-z0-9\-]*)' + device_replacement: 'Motorola $1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; (moto[ a-zA-z0-9()]{0,50});((?: Build|.{0,50}\) AppleWebKit))' + device_replacement: '$1' + brand_replacement: 'Motorola' + model_replacement: '$1' + - regex: '; {0,2}(moto)(.{0,50})(?: Build|\) AppleWebKit)' + device_replacement: 'Motorola$2' + brand_replacement: 'Motorola' + model_replacement: '$2' + - regex: 'Nintendo WiiU' + device_replacement: 'Nintendo Wii U' + brand_replacement: 'Nintendo' + model_replacement: 'Wii U' + - regex: 'Nintendo (Switch|DS|3DS|DSi|Wii);' + device_replacement: 'Nintendo $1' + brand_replacement: 'Nintendo' + model_replacement: '$1' + - regex: '(?:Pantech|PANTECH)[ _-]?([A-Za-z0-9\-]+)' + device_replacement: 'Pantech $1' + brand_replacement: 'Pantech' + model_replacement: '$1' + - regex: 'Philips([A-Za-z0-9]+)' + device_replacement: 'Philips $1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: 'Philips ([A-Za-z0-9]+)' + device_replacement: 'Philips $1' + brand_replacement: 'Philips' + model_replacement: '$1' + - regex: '(SMART-TV); .{0,200} Tizen ' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: 'SymbianOS/9\.\d.{0,200} Samsung[/\-]([A-Za-z0-9 \-]+)' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '(Samsung)(SGH)(i[0-9]+)' + device_replacement: '$1 $2$3' + brand_replacement: '$1' + model_replacement: '$2-$3' + - regex: 'SAMSUNG-ANDROID-MMS/([^;/]{1,100})' + device_replacement: '$1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: 'SAMSUNG(?:; |[ -/])([A-Za-z0-9\-]+)' + regex_flag: 'i' + device_replacement: 'Samsung $1' + brand_replacement: 'Samsung' + model_replacement: '$1' + - regex: '(Dreamcast)' + device_replacement: 'Sega $1' + brand_replacement: 'Sega' + model_replacement: '$1' + - regex: '^SIE-([A-Za-z0-9]+)' + device_replacement: 'Siemens $1' + brand_replacement: 'Siemens' + model_replacement: '$1' + - regex: 'Softbank/[12]\.0/([A-Za-z0-9]+)' + device_replacement: 'Softbank $1' + brand_replacement: 'Softbank' + model_replacement: '$1' + - regex: 'SonyEricsson ?([A-Za-z0-9\-]+)' + device_replacement: 'Ericsson $1' + brand_replacement: 'SonyEricsson' + model_replacement: '$1' + - regex: 'Android [^;]{1,200}; ([^ ]+) (Sony)/' + device_replacement: '$2 $1' + brand_replacement: '$2' + model_replacement: '$1' + - regex: '(Sony)(?:BDP\/|\/|)([^ /;\)]+)[ /;\)]' + device_replacement: '$1 $2' + brand_replacement: '$1' + model_replacement: '$2' + - regex: 'Puffin/[\d\.]+IT' + device_replacement: 'iPad' + brand_replacement: 'Apple' + model_replacement: 'iPad' + - regex: 'Puffin/[\d\.]+IP' + device_replacement: 'iPhone' + brand_replacement: 'Apple' + model_replacement: 'iPhone' + - regex: 'Puffin/[\d\.]+AT' + device_replacement: 'Generic Tablet' + brand_replacement: 'Generic' + model_replacement: 'Tablet' + - regex: 'Puffin/[\d\.]+AP' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: 'Android[\- ][\d]+\.[\d]+; [A-Za-z]{2}\-[A-Za-z]{0,2}; WOWMobile (.{1,200})( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+\.[\d]+\-update1; [A-Za-z]{2}\-[A-Za-z]{0,2} {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[A-Za-z]{2}[_\-][A-Za-z]{0,2}\-? {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[A-Za-z]{0,2}\- {0,2}; {0,2}(.{1,200}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,2}[a-z]{0,2}[_\-]?[A-Za-z]{0,2};?( Build[/ ]|\))' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: 'Android[\- ][\d]+(?:\.[\d]+)(?:\.[\d]+|); {0,3}\-?[A-Za-z]{2}; {0,2}(.{1,50}?)( Build[/ ]|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\) AppleWebKit).{1,200}? Mobile Safari' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\) AppleWebKit).{1,200}? Safari' + brand_replacement: 'Generic_Android_Tablet' + model_replacement: '$1' + - regex: 'Android \d+?(?:\.\d+|)(?:\.\d+|); ([^;]{1,100}?)(?: Build|\))' + brand_replacement: 'Generic_Android' + model_replacement: '$1' + - regex: '(GoogleTV)' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '(WebTV)/\d+.\d+' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '^(Roku)/DVP-\d+\.\d+' + brand_replacement: 'Generic_Inettv' + model_replacement: '$1' + - regex: '(Android 3\.\d|Opera Tablet|Tablet; .{1,100}Firefox/|Android.{0,100}(?:Tab|Pad))' + regex_flag: 'i' + device_replacement: 'Generic Tablet' + brand_replacement: 'Generic' + model_replacement: 'Tablet' + - regex: '(Symbian|\bS60(Version|V\d)|\bS60\b|\((Series 60|Windows Mobile|Palm OS|Bada); Opera Mini|Windows CE|Opera Mobi|BREW|Brew|Mobile; .{1,200}Firefox/|iPhone OS|Android|MobileSafari|Windows {0,2}Phone|\(webOS/|PalmOS)' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: '(hiptop|avantgo|plucker|xiino|blazer|elaine)' + regex_flag: 'i' + device_replacement: 'Generic Smartphone' + brand_replacement: 'Generic' + model_replacement: 'Smartphone' + - regex: '^.{0,100}(bot|BUbiNG|zao|borg|DBot|oegp|silk|Xenu|zeal|^NING|CCBot|crawl|htdig|lycos|slurp|teoma|voila|yahoo|Sogou|CiBra|Nutch|^Java/|^JNLP/|Daumoa|Daum|Genieo|ichiro|larbin|pompos|Scrapy|snappy|speedy|spider|msnbot|msrbot|vortex|^vortex|crawler|favicon|indexer|Riddler|scooter|scraper|scrubby|WhatWeb|WinHTTP|bingbot|BingPreview|openbot|gigabot|furlbot|polybot|seekbot|^voyager|archiver|Icarus6j|mogimogi|Netvibes|blitzbot|altavista|charlotte|findlinks|Retreiver|TLSProber|WordPress|SeznamBot|ProoXiBot|wsr\-agent|Squrl Java|EtaoSpider|PaperLiBot|SputnikBot|A6\-Indexer|netresearch|searchsight|baiduspider|YisouSpider|ICC\-Crawler|http%20client|Python-urllib|dataparksearch|converacrawler|Screaming Frog|AppEngine-Google|YahooCacheSystem|fast\-webcrawler|Sogou Pic Spider|semanticdiscovery|Innovazion Crawler|facebookexternalhit|Google.{0,200}/\+/web/snippet|Google-HTTP-Java-Client|BlogBridge|IlTrovatore-Setaccio|InternetArchive|GomezAgent|WebThumbnail|heritrix|NewsGator|PagePeeker|Reaper|ZooShot|holmes|NL-Crawler|Pingdom|StatusCake|WhatsApp|masscan|Google Web Preview|Qwantify|Yeti|OgScrper|RecipeRadar)' + regex_flag: 'i' + device_replacement: 'Spider' + brand_replacement: 'Spider' + model_replacement: 'Desktop' + - regex: '^(1207|3gso|4thp|501i|502i|503i|504i|505i|506i|6310|6590|770s|802s|a wa|acer|acs\-|airn|alav|asus|attw|au\-m|aur |aus |abac|acoo|aiko|alco|alca|amoi|anex|anny|anyw|aptu|arch|argo|bmobile|bell|bird|bw\-n|bw\-u|beck|benq|bilb|blac|c55/|cdm\-|chtm|capi|comp|cond|dall|dbte|dc\-s|dica|ds\-d|ds12|dait|devi|dmob|doco|dopo|dorado|el(?:38|39|48|49|50|55|58|68)|el[3456]\d{2}dual|erk0|esl8|ex300|ez40|ez60|ez70|ezos|ezze|elai|emul|eric|ezwa|fake|fly\-|fly_|g\-mo|g1 u|g560|gf\-5|grun|gene|go.w|good|grad|hcit|hd\-m|hd\-p|hd\-t|hei\-|hp i|hpip|hs\-c|htc |htc\-|htca|htcg)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(htcp|htcs|htct|htc_|haie|hita|huaw|hutc|i\-20|i\-go|i\-ma|i\-mobile|i230|iac|iac\-|iac/|ig01|im1k|inno|iris|jata|kddi|kgt|kgt/|kpt |kwc\-|klon|lexi|lg g|lg\-a|lg\-b|lg\-c|lg\-d|lg\-f|lg\-g|lg\-k|lg\-l|lg\-m|lg\-o|lg\-p|lg\-s|lg\-t|lg\-u|lg\-w|lg/k|lg/l|lg/u|lg50|lg54|lge\-|lge/|leno|m1\-w|m3ga|m50/|maui|mc01|mc21|mcca|medi|meri|mio8|mioa|mo01|mo02|mode|modo|mot |mot\-|mt50|mtp1|mtv |mate|maxo|merc|mits|mobi|motv|mozz|n100|n101|n102|n202|n203|n300|n302|n500|n502|n505|n700|n701|n710|nec\-|nem\-|newg|neon)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(netf|noki|nzph|o2 x|o2\-x|opwv|owg1|opti|oran|ot\-s|p800|pand|pg\-1|pg\-2|pg\-3|pg\-6|pg\-8|pg\-c|pg13|phil|pn\-2|pt\-g|palm|pana|pire|pock|pose|psio|qa\-a|qc\-2|qc\-3|qc\-5|qc\-7|qc07|qc12|qc21|qc32|qc60|qci\-|qwap|qtek|r380|r600|raks|rim9|rove|s55/|sage|sams|sc01|sch\-|scp\-|sdk/|se47|sec\-|sec0|sec1|semc|sgh\-|shar|sie\-|sk\-0|sl45|slid|smb3|smt5|sp01|sph\-|spv |spv\-|sy01|samm|sany|sava|scoo|send|siem|smar|smit|soft|sony|t\-mo|t218|t250|t600|t610|t618|tcl\-|tdg\-|telm|tim\-|ts70|tsm\-|tsm3|tsm5|tx\-9|tagt)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(talk|teli|topl|tosh|up.b|upg1|utst|v400|v750|veri|vk\-v|vk40|vk50|vk52|vk53|vm40|vx98|virg|vertu|vite|voda|vulc|w3c |w3c\-|wapj|wapp|wapu|wapm|wig |wapi|wapr|wapv|wapy|wapa|waps|wapt|winc|winw|wonu|x700|xda2|xdag|yas\-|your|zte\-|zeto|aste|audi|avan|blaz|brew|brvw|bumb|ccwa|cell|cldc|cmd\-|dang|eml2|fetc|hipt|http|ibro|idea|ikom|ipaq|jbro|jemu|jigs|keji|kyoc|kyok|libw|m\-cr|midp|mmef|moto|mwbp|mywa|newt|nok6|o2im|pant|pdxg|play|pluc|port|prox|rozo|sama|seri|smal|symb|treo|upsi|vx52|vx53|vx60|vx61|vx70|vx80|vx81|vx83|vx85|wap\-|webc|whit|wmlb|xda\-|xda_)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '^(Ice)$' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: '(wap[\-\ ]browser|maui|netfront|obigo|teleca|up\.browser|midp|Opera Mini)' + regex_flag: 'i' + device_replacement: 'Generic Feature Phone' + brand_replacement: 'Generic' + model_replacement: 'Feature Phone' + - regex: 'Mac OS' + device_replacement: 'Mac' + brand_replacement: 'Apple' + model_replacement: 'Mac'`) diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md new file mode 100644 index 00000000000..773c9b6431f --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md @@ -0,0 +1,27 @@ +# Contributing to go.opentelemetry.io/auto/sdk + +The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK. +It is designed to be: + +0. An OpenTelemetry compliant SDK +1. Instrumented by auto-instrumentation (serializable into OTLP JSON) +2. Lightweight +3. User-friendly + +These design choices are listed in the order of their importance. + +The primary design goal of this module is to be an OpenTelemetry SDK. +This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`. + +Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument. +The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP. +This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent. + +Outside of these first two goals, the intended use becomes relevant. +This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running. +Because of this, this package needs to not add unnecessary dependencies to that API. +Ideally, it adds none. +It also needs to operate efficiently. + +Finally, this module is designed to be user-friendly to Go development. +It hides complexity in order to provide simpler APIs when the previous goals can all still be met. diff --git a/vendor/go.opentelemetry.io/auto/sdk/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md new file mode 100644 index 00000000000..088d19a6ce7 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md @@ -0,0 +1,15 @@ +# Versioning + +This document describes the versioning policy for this module. +This policy is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path. + +* GitHub releases will be made for all releases. diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go new file mode 100644 index 00000000000..ad73d8cb9d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package sdk provides an auto-instrumentable OpenTelemetry SDK. + +An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the +process running this SDK. In that case, all telemetry the SDK produces will be +processed and handled by that [go.opentelemetry.io/auto.Instrumentation]. + +By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to +auto-instrument the SDK, the SDK will not generate any telemetry. +*/ +package sdk diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go new file mode 100644 index 00000000000..af6ef171f6a --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go new file mode 100644 index 00000000000..949e2165c05 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go new file mode 100644 index 00000000000..e854d7e84e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go new file mode 100644 index 00000000000..29e629d6674 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go new file mode 100644 index 00000000000..cecad8bae3c --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go new file mode 100644 index 00000000000..b6f2e28d408 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go new file mode 100644 index 00000000000..a13a6b733da --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go @@ -0,0 +1,456 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), + EndTime: uint64(endT), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.StartTime = time.Unix(0, int64(val.Uint64())) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + s.EndTime = time.Unix(0, int64(val.Uint64())) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + se.Time = time.Unix(0, int64(val.Uint64())) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go new file mode 100644 index 00000000000..1217776ead1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go new file mode 100644 index 00000000000..69a348f0f06 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go new file mode 100644 index 00000000000..0dd01b063a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go @@ -0,0 +1,452 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate stringer -type=ValueKind -trimprefix=ValueKind + +package telemetry + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{num: uint64(v), any: ValueKindInt64} +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go new file mode 100644 index 00000000000..86babf1a885 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "log/slog" + "os" + "strconv" +) + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + slog.Warn( + "invalid limit environment variable", + "error", err, + "key", key, + "value", strV, + ) + } + + return defaultVal +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go new file mode 100644 index 00000000000..6ebea12a9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/span.go @@ -0,0 +1,432 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "encoding/json" + "fmt" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type span struct { + noop.Span + + spanContext trace.SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *span) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *span) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *span) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *span) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + s.span.DroppedAttrs += uint32(len(attrs)) + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + if limit == 0 { + return nil, uint32(len(attrs)) + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + limit = min(len(attrs), limit) + return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *span) End(opts ...trace.SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *span) end(opts []trace.SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := trace.NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*span) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *span) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *span) AddEvent(name string, opts ...trace.EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := trace.NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *span) AddLink(link trace.Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []trace.Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link trace.Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *span) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() } diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go new file mode 100644 index 00000000000..cbcfabde3b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/auto/sdk/internal/telemetry" +) + +type tracer struct { + noop.Tracer + + name, schemaURL, version string +} + +var _ trace.Tracer = tracer{} + +func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + var psc trace.SpanContext + sampled := true + span := new(span) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = trace.ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := trace.NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *tracer) start( + ctx context.Context, + spanPtr *span, + psc *trace.SpanContext, + sampled *bool, + sc *trace.SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} + +func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + span.DroppedLinks = uint32(len(links)) + } else { + if limit > 0 { + n := max(len(links)-limit, 0) + span.DroppedLinks = uint32(n) + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind trace.SpanKind) telemetry.SpanKind { + switch kind { + case trace.SpanKindInternal: + return telemetry.SpanKindInternal + case trace.SpanKindServer: + return telemetry.SpanKindServer + case trace.SpanKindClient: + return telemetry.SpanKindClient + case trace.SpanKindProducer: + return telemetry.SpanKindProducer + case trace.SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go new file mode 100644 index 00000000000..dbc477a59ad --- /dev/null +++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk + +import ( + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" +) + +// TracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func TracerProvider() trace.TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(tracerProvider) + +type tracerProvider struct{ noop.TracerProvider } + +var _ trace.TracerProvider = tracerProvider{} + +func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + cfg := trace.NewTracerConfig(opts...) + return tracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} diff --git a/vendor/go.opentelemetry.io/collector/client/LICENSE b/vendor/go.opentelemetry.io/collector/client/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/client/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/client/Makefile b/vendor/go.opentelemetry.io/collector/client/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/client/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/component/component.go b/vendor/go.opentelemetry.io/collector/component/component.go index 794fc9235a9..0a0c160fc59 100644 --- a/vendor/go.opentelemetry.io/collector/component/component.go +++ b/vendor/go.opentelemetry.io/collector/component/component.go @@ -1,22 +1,17 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component +// Package component outlines the abstraction of components within the OpenTelemetry Collector. It provides details on the component // lifecycle as well as defining the interface that components must fulfill. package component // import "go.opentelemetry.io/collector/component" import ( "context" - "errors" + "fmt" + "strings" ) -var ( - // ErrDataTypeIsNotSupported can be returned by receiver, exporter or processor factory funcs that create the - // Component if the particular telemetry data type is not supported by the receiver, exporter or processor. - ErrDataTypeIsNotSupported = errors.New("telemetry type is not supported") -) - -// Component is either a receiver, exporter, processor, or an extension. +// Component is either a receiver, exporter, processor, connector, or an extension. // // A component's lifecycle has the following phases: // @@ -113,7 +108,7 @@ func (k Kind) String() string { // StabilityLevel represents the stability level of the component created by the factory. // The stability level is used to determine if the component should be used in production // or not. For more details see: -// https://github.com/open-telemetry/opentelemetry-collector#stability-levels +// https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stability-levels type StabilityLevel int const ( @@ -126,6 +121,29 @@ const ( StabilityLevelStable ) +func (sl *StabilityLevel) UnmarshalText(in []byte) error { + str := strings.ToLower(string(in)) + switch str { + case "undefined": + *sl = StabilityLevelUndefined + case "unmaintained": + *sl = StabilityLevelUnmaintained + case "deprecated": + *sl = StabilityLevelDeprecated + case "development": + *sl = StabilityLevelDevelopment + case "alpha": + *sl = StabilityLevelAlpha + case "beta": + *sl = StabilityLevelBeta + case "stable": + *sl = StabilityLevelStable + default: + return fmt.Errorf("unsupported stability level: %q", string(in)) + } + return nil +} + func (sl StabilityLevel) String() string { switch sl { case StabilityLevelUndefined: @@ -149,7 +167,7 @@ func (sl StabilityLevel) String() string { func (sl StabilityLevel) LogMessage() string { switch sl { case StabilityLevelUnmaintained: - return "Unmaintained component. Actively looking for contributors. Component will become deprecated after 6 months of remaining unmaintained." + return "Unmaintained component. Actively looking for contributors. Component will become deprecated after 3 months of remaining unmaintained." case StabilityLevelDeprecated: return "Deprecated component. Will be removed in future releases." case StabilityLevelDevelopment: @@ -160,21 +178,19 @@ func (sl StabilityLevel) LogMessage() string { return "Beta component. May change in the future." case StabilityLevelStable: return "Stable component." + default: + return "Stability level of component is undefined" } - return "Stability level of component is undefined" } // Factory is implemented by all Component factories. -// -// This interface cannot be directly implemented. Implementations must -// use the factory helpers for the appropriate component type. type Factory interface { // Type gets the type of the component created by this factory. Type() Type // CreateDefaultConfig creates the default configuration for the Component. // This method can be called multiple times depending on the pipeline - // configuration and should not cause side-effects that prevent the creation + // configuration and should not cause side effects that prevent the creation // of multiple instances of the Component. // The object returned by this method needs to pass the checks implemented by // 'componenttest.CheckConfigStruct'. It is recommended to have these checks in the @@ -189,10 +205,3 @@ type CreateDefaultConfigFunc func() Config func (f CreateDefaultConfigFunc) CreateDefaultConfig() Config { return f() } - -// InstanceID uniquely identifies a component instance -type InstanceID struct { - ID ID - Kind Kind - PipelineIDs map[ID]struct{} -} diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE b/vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/Makefile b/vendor/go.opentelemetry.io/collector/component/componentstatus/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go b/vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go new file mode 100644 index 00000000000..a654b7f7a87 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/instance.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componentstatus // import "go.opentelemetry.io/collector/component/componentstatus" + +import ( + "slices" + "sort" + "strings" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" +) + +// pipelineDelim is the delimiter for internal representation of pipeline +// component IDs. +const pipelineDelim = byte(0x20) + +// InstanceID uniquely identifies a component instance +// +// TODO: consider moving this struct to a new package/module like `extension/statuswatcher` +// https://github.com/open-telemetry/opentelemetry-collector/issues/10764 +type InstanceID struct { + componentID component.ID + kind component.Kind + pipelineIDs string // IDs encoded as a string so InstanceID is Comparable. +} + +// NewInstanceID returns an ID that uniquely identifies a component. +func NewInstanceID(componentID component.ID, kind component.Kind, pipelineIDs ...pipeline.ID) *InstanceID { + instanceID := &InstanceID{ + componentID: componentID, + kind: kind, + } + instanceID.addPipelines(pipelineIDs) + return instanceID +} + +// ComponentID returns the ComponentID associated with this instance. +func (id *InstanceID) ComponentID() component.ID { + return id.componentID +} + +// Kind returns the component Kind associated with this instance. +func (id *InstanceID) Kind() component.Kind { + return id.kind +} + +// AllPipelineIDs calls f for each pipeline this instance is associated with. If +// f returns false it will stop iteration. +func (id *InstanceID) AllPipelineIDs(f func(pipeline.ID) bool) { + var bs []byte + for _, b := range []byte(id.pipelineIDs) { + if b != pipelineDelim { + bs = append(bs, b) + continue + } + pipelineID := pipeline.ID{} + err := pipelineID.UnmarshalText(bs) + bs = bs[:0] + if err != nil { + continue + } + if !f(pipelineID) { + break + } + } +} + +// WithPipelines returns a new InstanceID updated to include the given +// pipelineIDs. +func (id *InstanceID) WithPipelines(pipelineIDs ...pipeline.ID) *InstanceID { + instanceID := &InstanceID{ + componentID: id.componentID, + kind: id.kind, + pipelineIDs: id.pipelineIDs, + } + instanceID.addPipelines(pipelineIDs) + return instanceID +} + +func (id *InstanceID) addPipelines(pipelineIDs []pipeline.ID) { + delim := string(pipelineDelim) + strIDs := strings.Split(id.pipelineIDs, delim) + for _, pID := range pipelineIDs { + strIDs = append(strIDs, pID.String()) + } + sort.Strings(strIDs) + strIDs = slices.Compact(strIDs) + id.pipelineIDs = strings.Join(strIDs, delim) + delim +} diff --git a/vendor/go.opentelemetry.io/collector/component/componentstatus/status.go b/vendor/go.opentelemetry.io/collector/component/componentstatus/status.go new file mode 100644 index 00000000000..c55fff3ffa3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componentstatus/status.go @@ -0,0 +1,160 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package componentstatus is an experimental module that defines how components should +// report health statues, how collector hosts should facilitate component status reporting, +// and how extensions should watch for new component statuses. +// +// This package is currently under development and is exempt from the Collector SIG's +// breaking change policy. +package componentstatus // import "go.opentelemetry.io/collector/component/componentstatus" + +import ( + "time" + + "go.opentelemetry.io/collector/component" +) + +// Reporter is an extra interface for `component.Host` implementations. +// A Reporter defines how to report a `componentstatus.Event`. +type Reporter interface { + // Report allows a component to report runtime changes in status. The service + // will automatically report status for a component during startup and shutdown. Components can + // use this method to report status after start and before shutdown. For more details about + // component status reporting see: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-status.md + Report(*Event) +} + +// Watcher is an extra interface for Extension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to component +// status. +// +// TODO: consider moving this interface to a new package/module like `extension/statuswatcher` +// https://github.com/open-telemetry/opentelemetry-collector/issues/10764 +type Watcher interface { + // ComponentStatusChanged notifies about a change in the source component status. + // Extensions that implement this interface must be ready that the ComponentStatusChanged + // may be called before, after or concurrently with calls to Component.Start() and Component.Shutdown(). + // The function may be called concurrently with itself. + ComponentStatusChanged(source *InstanceID, event *Event) +} + +type Status int32 + +// Enumeration of possible component statuses +const ( + // StatusNone indicates absence of component status. + StatusNone Status = iota + // StatusStarting indicates the component is starting. + StatusStarting + // StatusOK indicates the component is running without issues. + StatusOK + // StatusRecoverableError indicates that the component has experienced a transient error and may recover. + StatusRecoverableError + // StatusPermanentError indicates that the component has detected a condition at runtime that will need human intervention to fix. The collector will continue to run in a degraded mode. + StatusPermanentError + // StatusFatalError indicates that the collector has experienced a fatal runtime error and will shut down. + StatusFatalError + // StatusStopping indicates that the component is in the process of shutting down. + StatusStopping + // StatusStopped indicates that the component has completed shutdown. + StatusStopped +) + +// String returns a string representation of a Status +func (s Status) String() string { + switch s { + case StatusStarting: + return "StatusStarting" + case StatusOK: + return "StatusOK" + case StatusRecoverableError: + return "StatusRecoverableError" + case StatusPermanentError: + return "StatusPermanentError" + case StatusFatalError: + return "StatusFatalError" + case StatusStopping: + return "StatusStopping" + case StatusStopped: + return "StatusStopped" + } + return "StatusNone" +} + +// Event contains a status and timestamp, and can contain an error +type Event struct { + status Status + err error + // TODO: consider if a timestamp is necessary in the default Event struct or is needed only for the healthcheckv2 extension + // https://github.com/open-telemetry/opentelemetry-collector/issues/10763 + timestamp time.Time +} + +// Status returns the Status (enum) associated with the Event +func (ev *Event) Status() Status { + return ev.status +} + +// Err returns the error associated with the Event. +func (ev *Event) Err() error { + return ev.err +} + +// Timestamp returns the timestamp associated with the Event +func (ev *Event) Timestamp() time.Time { + return ev.timestamp +} + +// NewEvent creates and returns a Event with the specified status and sets the timestamp +// time.Now(). To set an error on the event for an error status use one of the dedicated +// constructors (e.g. NewRecoverableErrorEvent, NewPermanentErrorEvent, NewFatalErrorEvent) +func NewEvent(status Status) *Event { + return &Event{ + status: status, + timestamp: time.Now(), + } +} + +// NewRecoverableErrorEvent wraps a transient error +// passed as argument as a Event with a status StatusRecoverableError +// and a timestamp set to time.Now(). +func NewRecoverableErrorEvent(err error) *Event { + ev := NewEvent(StatusRecoverableError) + ev.err = err + return ev +} + +// NewPermanentErrorEvent wraps an error requiring human intervention to fix +// passed as argument as a Event with a status StatusPermanentError +// and a timestamp set to time.Now(). +func NewPermanentErrorEvent(err error) *Event { + ev := NewEvent(StatusPermanentError) + ev.err = err + return ev +} + +// NewFatalErrorEvent wraps the fatal runtime error passed as argument as a Event +// with a status StatusFatalError and a timestamp set to time.Now(). +func NewFatalErrorEvent(err error) *Event { + ev := NewEvent(StatusFatalError) + ev.err = err + return ev +} + +// StatusIsError returns true for error statuses (e.g. StatusRecoverableError, +// StatusPermanentError, or StatusFatalError) +func StatusIsError(status Status) bool { + return status == StatusRecoverableError || + status == StatusPermanentError || + status == StatusFatalError +} + +// ReportStatus is a helper function that handles checking if the component.Host has implemented Reporter. +// If it has, the Event is reported. Otherwise, nothing happens. +func ReportStatus(host component.Host, e *Event) { + statusReporter, ok := host.(Reporter) + if ok { + statusReporter.Report(e) + } +} diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE b/vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/Makefile b/vendor/go.opentelemetry.io/collector/component/componenttest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go b/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go index 3750dfd0295..8d1f1d157da 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/configtest.go @@ -65,10 +65,8 @@ func validateConfigDataType(t reflect.Type) error { // checkStructFieldTags inspects the tags of a struct field. func checkStructFieldTags(f reflect.StructField) error { - tagValue := f.Tag.Get("mapstructure") if tagValue == "" { - // Ignore special types. switch f.Type.Kind() { case reflect.Interface, reflect.Chan, reflect.Func, reflect.Uintptr, reflect.UnsafePointer: diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go index ea85ccc8bbe..3c9206456a3 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_host.go @@ -22,7 +22,3 @@ func (nh *nopHost) GetFactory(component.Kind, component.Type) component.Factory func (nh *nopHost) GetExtensions() map[component.ID]component.Component { return nil } - -func (nh *nopHost) GetExporters() map[component.DataType]map[component.ID]component.Component { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go index 171e9daa47e..0324f65c980 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/nop_telemetry.go @@ -21,7 +21,5 @@ func NewNopTelemetrySettings() component.TelemetrySettings { MeterProvider: noopmetric.NewMeterProvider(), MetricsLevel: configtelemetry.LevelNone, Resource: pcommon.NewResource(), - ReportStatus: func(*component.StatusEvent) { - }, } } diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go b/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go index f342879a533..7c2875aa0dc 100644 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/obsreporttest.go @@ -5,18 +5,18 @@ package componenttest // import "go.opentelemetry.io/collector/component/compone import ( "context" + "errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" + "go.opentelemetry.io/otel/attribute" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" - "go.uber.org/multierr" + "go.uber.org/zap" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -30,102 +30,79 @@ const ( scraperTag = "scraper" transportTag = "transport" exporterTag = "exporter" - processorTag = "processor" ) type TestTelemetry struct { - ts component.TelemetrySettings id component.ID + ts component.TelemetrySettings SpanRecorder *tracetest.SpanRecorder - - prometheusChecker *prometheusChecker - meterProvider *sdkmetric.MeterProvider + reader *sdkmetric.ManualReader } // CheckExporterTraces checks that for the current exported values for trace exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckExporterTraces(sentSpans, sendFailedSpans int64) error { - return tts.prometheusChecker.checkExporterTraces(tts.id, sentSpans, sendFailedSpans) + return checkExporterTraces(tts.reader, tts.id, sentSpans, sendFailedSpans) } // CheckExporterMetrics checks that for the current exported values for metrics exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckExporterMetrics(sentMetricsPoints, sendFailedMetricsPoints int64) error { - return tts.prometheusChecker.checkExporterMetrics(tts.id, sentMetricsPoints, sendFailedMetricsPoints) + return checkExporterMetrics(tts.reader, tts.id, sentMetricsPoints, sendFailedMetricsPoints) } func (tts *TestTelemetry) CheckExporterEnqueueFailedMetrics(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "metric_points", enqueueFailed) + return checkExporterEnqueueFailed(tts.reader, tts.id, "metric_points", enqueueFailed) } func (tts *TestTelemetry) CheckExporterEnqueueFailedTraces(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "spans", enqueueFailed) + return checkExporterEnqueueFailed(tts.reader, tts.id, "spans", enqueueFailed) } func (tts *TestTelemetry) CheckExporterEnqueueFailedLogs(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "log_records", enqueueFailed) + return checkExporterEnqueueFailed(tts.reader, tts.id, "log_records", enqueueFailed) } // CheckExporterLogs checks that for the current exported values for logs exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckExporterLogs(sentLogRecords, sendFailedLogRecords int64) error { - return tts.prometheusChecker.checkExporterLogs(tts.id, sentLogRecords, sendFailedLogRecords) -} - -func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64) error { - return tts.prometheusChecker.checkExporterMetricGauge(tts.id, metric, val) -} - -// CheckProcessorTraces checks that for the current exported values for trace exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorTraces(acceptedSpans, refusedSpans, droppedSpans int64) error { - return tts.prometheusChecker.checkProcessorTraces(tts.id, acceptedSpans, refusedSpans, droppedSpans) + return checkExporterLogs(tts.reader, tts.id, sentLogRecords, sendFailedLogRecords) } -// CheckProcessorMetrics checks that for the current exported values for metrics exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorMetrics(acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints int64) error { - return tts.prometheusChecker.checkProcessorMetrics(tts.id, acceptedMetricPoints, refusedMetricPoints, droppedMetricPoints) -} - -// CheckProcessorLogs checks that for the current exported values for logs exporter metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. -func (tts *TestTelemetry) CheckProcessorLogs(acceptedLogRecords, refusedLogRecords, droppedLogRecords int64) error { - return tts.prometheusChecker.checkProcessorLogs(tts.id, acceptedLogRecords, refusedLogRecords, droppedLogRecords) +func (tts *TestTelemetry) CheckExporterMetricGauge(metric string, val int64, extraAttrs ...attribute.KeyValue) error { + attrs := attributesForExporterMetrics(tts.id, extraAttrs...) + return checkIntGauge(tts.reader, metric, val, attrs) } // CheckReceiverTraces checks that for the current exported values for trace receiver metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckReceiverTraces(protocol string, acceptedSpans, droppedSpans int64) error { - return tts.prometheusChecker.checkReceiverTraces(tts.id, protocol, acceptedSpans, droppedSpans) + return checkReceiverTraces(tts.reader, tts.id, protocol, acceptedSpans, droppedSpans) } // CheckReceiverLogs checks that for the current exported values for logs receiver metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckReceiverLogs(protocol string, acceptedLogRecords, droppedLogRecords int64) error { - return tts.prometheusChecker.checkReceiverLogs(tts.id, protocol, acceptedLogRecords, droppedLogRecords) + return checkReceiverLogs(tts.reader, tts.id, protocol, acceptedLogRecords, droppedLogRecords) } // CheckReceiverMetrics checks that for the current exported values for metrics receiver metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckReceiverMetrics(protocol string, acceptedMetricPoints, droppedMetricPoints int64) error { - return tts.prometheusChecker.checkReceiverMetrics(tts.id, protocol, acceptedMetricPoints, droppedMetricPoints) + return checkReceiverMetrics(tts.reader, tts.id, protocol, acceptedMetricPoints, droppedMetricPoints) } // CheckScraperMetrics checks that for the current exported values for metrics scraper metrics match given values. -// When this function is called it is required to also call SetupTelemetry as first thing. +// Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckScraperMetrics(receiver component.ID, scraper component.ID, scrapedMetricPoints, erroredMetricPoints int64) error { - return tts.prometheusChecker.checkScraperMetrics(receiver, scraper, scrapedMetricPoints, erroredMetricPoints) + return checkScraperMetrics(tts.reader, receiver, scraper, scrapedMetricPoints, erroredMetricPoints) } // Shutdown unregisters any views and shuts down the SpanRecorder func (tts *TestTelemetry) Shutdown(ctx context.Context) error { - var errs error - errs = multierr.Append(errs, tts.SpanRecorder.Shutdown(ctx)) - if tts.meterProvider != nil { - errs = multierr.Append(errs, tts.meterProvider.Shutdown(ctx)) - } - return errs + return errors.Join( + tts.ts.TracerProvider.(*sdktrace.TracerProvider).Shutdown(ctx), + tts.ts.MeterProvider.(*sdkmetric.MeterProvider).Shutdown(ctx)) } // TelemetrySettings returns the TestTelemetry's TelemetrySettings @@ -133,36 +110,27 @@ func (tts *TestTelemetry) TelemetrySettings() component.TelemetrySettings { return tts.ts } -// SetupTelemetry does setup the testing environment to check the metrics recorded by receivers, producers or exporters. -// The caller must pass the ID of the component that intends to test, so the CreateSettings and Check methods will use. -// The caller should defer a call to Shutdown the returned TestTelemetry. +// SetupTelemetry sets up the testing environment to check the metrics recorded by receivers, producers, or exporters. +// The caller must pass the ID of the component being tested. The ID will be used by the CreateSettings and Check methods. +// The caller must defer a call to `Shutdown` on the returned TestTelemetry. func SetupTelemetry(id component.ID) (TestTelemetry, error) { - sr := new(tracetest.SpanRecorder) - tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) - settings := TestTelemetry{ - ts: NewNopTelemetrySettings(), id: id, - SpanRecorder: sr, - } - settings.ts.TracerProvider = tp - settings.ts.MetricsLevel = configtelemetry.LevelNormal - - promRegOtel := prometheus.NewRegistry() - - exp, err := otelprom.New(otelprom.WithRegisterer(promRegOtel), otelprom.WithoutUnits(), otelprom.WithoutScopeInfo(), otelprom.WithoutCounterSuffixes()) - if err != nil { - return settings, err + reader: sdkmetric.NewManualReader(), + SpanRecorder: new(tracetest.SpanRecorder), } - settings.meterProvider = sdkmetric.NewMeterProvider( + mp := sdkmetric.NewMeterProvider( sdkmetric.WithResource(resource.Empty()), - sdkmetric.WithReader(exp), + sdkmetric.WithReader(settings.reader), ) - settings.ts.MeterProvider = settings.meterProvider - settings.prometheusChecker = &prometheusChecker{ - otelHandler: promhttp.HandlerFor(promRegOtel, promhttp.HandlerOpts{}), + settings.ts = component.TelemetrySettings{ + Logger: zap.NewNop(), + TracerProvider: sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(settings.SpanRecorder)), + MeterProvider: mp, + MetricsLevel: configtelemetry.LevelDetailed, + Resource: pcommon.NewResource(), } return settings, nil diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go b/vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go new file mode 100644 index 00000000000..c3196f7d66e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/component/componenttest/otelchecker.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package componenttest // import "go.opentelemetry.io/collector/component/componenttest" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/attribute" + sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/component" +) + +func checkScraperMetrics(reader *sdkmetric.ManualReader, receiver component.ID, scraper component.ID, scrapedMetricPoints, erroredMetricPoints int64) error { + scraperAttrs := attributesForScraperMetrics(receiver, scraper) + return multierr.Combine( + checkIntSum(reader, "otelcol_scraper_scraped_metric_points", scrapedMetricPoints, scraperAttrs), + checkIntSum(reader, "otelcol_scraper_errored_metric_points", erroredMetricPoints, scraperAttrs)) +} + +func checkReceiverTraces(reader *sdkmetric.ManualReader, receiver component.ID, protocol string, accepted, dropped int64) error { + return checkReceiver(reader, receiver, "spans", protocol, accepted, dropped) +} + +func checkReceiverLogs(reader *sdkmetric.ManualReader, receiver component.ID, protocol string, accepted, dropped int64) error { + return checkReceiver(reader, receiver, "log_records", protocol, accepted, dropped) +} + +func checkReceiverMetrics(reader *sdkmetric.ManualReader, receiver component.ID, protocol string, accepted, dropped int64) error { + return checkReceiver(reader, receiver, "metric_points", protocol, accepted, dropped) +} + +func checkReceiver(reader *sdkmetric.ManualReader, receiver component.ID, datatype, protocol string, acceptedMetricPoints, droppedMetricPoints int64) error { + receiverAttrs := attributesForReceiverMetrics(receiver, protocol) + return multierr.Combine( + checkIntSum(reader, "otelcol_receiver_accepted_"+datatype, acceptedMetricPoints, receiverAttrs), + checkIntSum(reader, "otelcol_receiver_refused_"+datatype, droppedMetricPoints, receiverAttrs)) +} + +func checkExporterTraces(reader *sdkmetric.ManualReader, exporter component.ID, sent, sendFailed int64) error { + return checkExporter(reader, exporter, "spans", sent, sendFailed) +} + +func checkExporterLogs(reader *sdkmetric.ManualReader, exporter component.ID, sent, sendFailed int64) error { + return checkExporter(reader, exporter, "log_records", sent, sendFailed) +} + +func checkExporterMetrics(reader *sdkmetric.ManualReader, exporter component.ID, sent, sendFailed int64) error { + return checkExporter(reader, exporter, "metric_points", sent, sendFailed) +} + +func checkExporter(reader *sdkmetric.ManualReader, exporter component.ID, datatype string, sent, sendFailed int64) error { + exporterAttrs := attributesForExporterMetrics(exporter) + errs := checkIntSum(reader, "otelcol_exporter_sent_"+datatype, sent, exporterAttrs) + if sendFailed > 0 { + errs = multierr.Append(errs, + checkIntSum(reader, "otelcol_exporter_send_failed_"+datatype, sendFailed, exporterAttrs)) + } + return errs +} + +func checkExporterEnqueueFailed(reader *sdkmetric.ManualReader, exporter component.ID, datatype string, enqueueFailed int64) error { + if enqueueFailed == 0 { + return nil + } + exporterAttrs := attributesForExporterMetrics(exporter) + return checkIntSum(reader, "otelcol_exporter_enqueue_failed_"+datatype, enqueueFailed, exporterAttrs) +} + +func checkIntGauge(reader *sdkmetric.ManualReader, metric string, expected int64, expectedAttrs attribute.Set) error { + dp, err := getGaugeDataPoint[int64](reader, metric, expectedAttrs) + if err != nil { + return err + } + + if dp.Value != expected { + return fmt.Errorf("values for metric '%s' did not match, expected '%d' got '%d'", metric, expected, dp.Value) + } + + return nil +} + +func checkIntSum(reader *sdkmetric.ManualReader, expectedMetric string, expected int64, expectedAttrs attribute.Set) error { + dp, err := getSumDataPoint[int64](reader, expectedMetric, expectedAttrs) + if err != nil { + return err + } + + if dp.Value != expected { + return fmt.Errorf("values for metric '%s' did not match, expected '%d' got '%d'", expectedMetric, expected, dp.Value) + } + + return nil +} + +func getSumDataPoint[N int64 | float64](reader *sdkmetric.ManualReader, expectedName string, expectedAttrs attribute.Set) (metricdata.DataPoint[N], error) { + m, err := getMetric(reader, expectedName) + if err != nil { + return metricdata.DataPoint[N]{}, err + } + + switch a := m.Data.(type) { + case metricdata.Sum[N]: + return getDataPoint(a.DataPoints, expectedName, expectedAttrs) + default: + return metricdata.DataPoint[N]{}, fmt.Errorf("unknown metric type: %T", a) + } +} + +func getGaugeDataPoint[N int64 | float64](reader *sdkmetric.ManualReader, expectedName string, expectedAttrs attribute.Set) (metricdata.DataPoint[N], error) { + m, err := getMetric(reader, expectedName) + if err != nil { + return metricdata.DataPoint[N]{}, err + } + + switch a := m.Data.(type) { + case metricdata.Gauge[N]: + return getDataPoint(a.DataPoints, expectedName, expectedAttrs) + default: + return metricdata.DataPoint[N]{}, fmt.Errorf("unknown metric type: %T", a) + } +} + +func getDataPoint[N int64 | float64](dps []metricdata.DataPoint[N], expectedName string, expectedAttrs attribute.Set) (metricdata.DataPoint[N], error) { + for _, dp := range dps { + if expectedAttrs.Equals(&dp.Attributes) { + return dp, nil + } + } + return metricdata.DataPoint[N]{}, fmt.Errorf("metric '%s' doesn't have a data point with the given attributes: %s", expectedName, expectedAttrs.Encoded(attribute.DefaultEncoder())) +} + +func getMetric(reader *sdkmetric.ManualReader, expectedName string) (metricdata.Metrics, error) { + var rm metricdata.ResourceMetrics + if err := reader.Collect(context.Background(), &rm); err != nil { + return metricdata.Metrics{}, err + } + + for _, sm := range rm.ScopeMetrics { + for _, m := range sm.Metrics { + if m.Name == expectedName { + return m, nil + } + } + } + return metricdata.Metrics{}, fmt.Errorf("metric '%s' not found", expectedName) +} + +func attributesForScraperMetrics(receiver component.ID, scraper component.ID) attribute.Set { + return attribute.NewSet( + attribute.String(receiverTag, receiver.String()), + attribute.String(scraperTag, scraper.String()), + ) +} + +// attributesForReceiverMetrics returns the attributes that are needed for the receiver metrics. +func attributesForReceiverMetrics(receiver component.ID, transport string) attribute.Set { + return attribute.NewSet( + attribute.String(receiverTag, receiver.String()), + attribute.String(transportTag, transport), + ) +} + +// attributesForExporterMetrics returns the attributes that are needed for the receiver metrics. +func attributesForExporterMetrics(exporter component.ID, extraAttrs ...attribute.KeyValue) attribute.Set { + attrs := []attribute.KeyValue{attribute.String(exporterTag, exporter.String())} + attrs = append(attrs, extraAttrs...) + return attribute.NewSet(attrs...) +} diff --git a/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go b/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go deleted file mode 100644 index 5beef52373a..00000000000 --- a/vendor/go.opentelemetry.io/collector/component/componenttest/otelprometheuschecker.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package componenttest // import "go.opentelemetry.io/collector/component/componenttest" - -import ( - "fmt" - "math" - "net/http" - "net/http/httptest" - - io_prometheus_client "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" - "go.opentelemetry.io/otel/attribute" - "go.uber.org/multierr" - - "go.opentelemetry.io/collector/component" -) - -// prometheusChecker is used to assert exported metrics from a prometheus handler. -type prometheusChecker struct { - otelHandler http.Handler -} - -func (pc *prometheusChecker) checkScraperMetrics(receiver component.ID, scraper component.ID, scrapedMetricPoints, erroredMetricPoints int64) error { - scraperAttrs := attributesForScraperMetrics(receiver, scraper) - return multierr.Combine( - pc.checkCounter("scraper_scraped_metric_points", scrapedMetricPoints, scraperAttrs), - pc.checkCounter("scraper_errored_metric_points", erroredMetricPoints, scraperAttrs)) -} - -func (pc *prometheusChecker) checkReceiverTraces(receiver component.ID, protocol string, accepted, dropped int64) error { - return pc.checkReceiver(receiver, "spans", protocol, accepted, dropped) -} - -func (pc *prometheusChecker) checkReceiverLogs(receiver component.ID, protocol string, accepted, dropped int64) error { - return pc.checkReceiver(receiver, "log_records", protocol, accepted, dropped) -} - -func (pc *prometheusChecker) checkReceiverMetrics(receiver component.ID, protocol string, accepted, dropped int64) error { - return pc.checkReceiver(receiver, "metric_points", protocol, accepted, dropped) -} - -func (pc *prometheusChecker) checkReceiver(receiver component.ID, datatype, protocol string, acceptedMetricPoints, droppedMetricPoints int64) error { - receiverAttrs := attributesForReceiverMetrics(receiver, protocol) - return multierr.Combine( - pc.checkCounter(fmt.Sprintf("receiver_accepted_%s", datatype), acceptedMetricPoints, receiverAttrs), - pc.checkCounter(fmt.Sprintf("receiver_refused_%s", datatype), droppedMetricPoints, receiverAttrs)) -} - -func (pc *prometheusChecker) checkProcessorTraces(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "spans", accepted, refused, dropped) -} - -func (pc *prometheusChecker) checkProcessorMetrics(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "metric_points", accepted, refused, dropped) -} - -func (pc *prometheusChecker) checkProcessorLogs(processor component.ID, accepted, refused, dropped int64) error { - return pc.checkProcessor(processor, "log_records", accepted, refused, dropped) -} - -func (pc *prometheusChecker) checkProcessor(processor component.ID, datatype string, accepted, refused, dropped int64) error { - processorAttrs := attributesForProcessorMetrics(processor) - return multierr.Combine( - pc.checkCounter(fmt.Sprintf("processor_accepted_%s", datatype), accepted, processorAttrs), - pc.checkCounter(fmt.Sprintf("processor_refused_%s", datatype), refused, processorAttrs), - pc.checkCounter(fmt.Sprintf("processor_dropped_%s", datatype), dropped, processorAttrs)) -} - -func (pc *prometheusChecker) checkExporterTraces(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "spans", sent, sendFailed) -} - -func (pc *prometheusChecker) checkExporterLogs(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "log_records", sent, sendFailed) -} - -func (pc *prometheusChecker) checkExporterMetrics(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "metric_points", sent, sendFailed) -} - -func (pc *prometheusChecker) checkExporter(exporter component.ID, datatype string, sent, sendFailed int64) error { - exporterAttrs := attributesForExporterMetrics(exporter) - errs := pc.checkCounter(fmt.Sprintf("exporter_sent_%s", datatype), sent, exporterAttrs) - if sendFailed > 0 { - errs = multierr.Append(errs, - pc.checkCounter(fmt.Sprintf("exporter_send_failed_%s", datatype), sendFailed, exporterAttrs)) - } - return errs -} - -func (pc *prometheusChecker) checkExporterEnqueueFailed(exporter component.ID, datatype string, enqueueFailed int64) error { - if enqueueFailed == 0 { - return nil - } - exporterAttrs := attributesForExporterMetrics(exporter) - return pc.checkCounter(fmt.Sprintf("exporter_enqueue_failed_%s", datatype), enqueueFailed, exporterAttrs) -} - -func (pc *prometheusChecker) checkExporterMetricGauge(exporter component.ID, metric string, val int64) error { - exporterAttrs := attributesForExporterMetrics(exporter) - - ts, err := pc.getMetric(metric, io_prometheus_client.MetricType_GAUGE, exporterAttrs) - if err != nil { - return err - } - - expected := float64(val) - if math.Abs(ts.GetGauge().GetValue()-expected) > 0.0001 { - return fmt.Errorf("values for metric '%s' did not match, expected '%f' got '%f'", metric, expected, ts.GetGauge().GetValue()) - } - - return nil -} - -func (pc *prometheusChecker) checkCounter(expectedMetric string, value int64, attrs []attribute.KeyValue) error { - - ts, err := pc.getMetric(expectedMetric, io_prometheus_client.MetricType_COUNTER, attrs) - if err != nil { - return err - } - - expected := float64(value) - if math.Abs(expected-ts.GetCounter().GetValue()) > 0.0001 { - return fmt.Errorf("values for metric '%s' did not match, expected '%f' got '%f'", expectedMetric, expected, ts.GetCounter().GetValue()) - } - - return nil -} - -// getMetric returns the metric time series that matches the given name, type and set of attributes -// it fetches data from the prometheus endpoint and parse them, ideally OTel Go should provide a MeterRecorder of some kind. -func (pc *prometheusChecker) getMetric(expectedName string, expectedType io_prometheus_client.MetricType, expectedAttrs []attribute.KeyValue) (*io_prometheus_client.Metric, error) { - parsed, err := fetchPrometheusMetrics(pc.otelHandler) - if err != nil { - return nil, err - } - - metricFamily, ok := parsed[expectedName] - if !ok { - return nil, fmt.Errorf("metric '%s' not found", expectedName) - } - - if metricFamily.Type.String() != expectedType.String() { - return nil, fmt.Errorf("metric '%v' has type '%s' instead of '%s'", expectedName, metricFamily.Type.String(), expectedType.String()) - } - - expectedSet := attribute.NewSet(expectedAttrs...) - - for _, metric := range metricFamily.Metric { - var attrs []attribute.KeyValue - - for _, label := range metric.Label { - attrs = append(attrs, attribute.String(label.GetName(), label.GetValue())) - } - set := attribute.NewSet(attrs...) - - if expectedSet.Equals(&set) { - return metric, nil - } - } - - return nil, fmt.Errorf("metric '%s' doesn't have a timeseries with the given attributes: %s", expectedName, expectedSet.Encoded(attribute.DefaultEncoder())) -} - -func fetchPrometheusMetrics(handler http.Handler) (map[string]*io_prometheus_client.MetricFamily, error) { - req, err := http.NewRequest(http.MethodGet, "/metrics", nil) - if err != nil { - return nil, err - } - - rr := httptest.NewRecorder() - handler.ServeHTTP(rr, req) - - var parser expfmt.TextParser - return parser.TextToMetricFamilies(rr.Body) -} - -func attributesForScraperMetrics(receiver component.ID, scraper component.ID) []attribute.KeyValue { - return []attribute.KeyValue{ - attribute.String(receiverTag, receiver.String()), - attribute.String(scraperTag, scraper.String()), - } -} - -// attributesForReceiverMetrics returns the attributes that are needed for the receiver metrics. -func attributesForReceiverMetrics(receiver component.ID, transport string) []attribute.KeyValue { - return []attribute.KeyValue{ - attribute.String(receiverTag, receiver.String()), - attribute.String(transportTag, transport), - } -} - -func attributesForProcessorMetrics(processor component.ID) []attribute.KeyValue { - return []attribute.KeyValue{attribute.String(processorTag, processor.String())} -} - -// attributesForExporterMetrics returns the attributes that are needed for the receiver metrics. -func attributesForExporterMetrics(exporter component.ID) []attribute.KeyValue { - return []attribute.KeyValue{attribute.String(exporterTag, exporter.String())} -} diff --git a/vendor/go.opentelemetry.io/collector/component/config.go b/vendor/go.opentelemetry.io/collector/component/config.go index b53ff872fda..599b9be3236 100644 --- a/vendor/go.opentelemetry.io/collector/component/config.go +++ b/vendor/go.opentelemetry.io/collector/component/config.go @@ -4,13 +4,9 @@ package component // import "go.opentelemetry.io/collector/component" import ( - "fmt" "reflect" - "regexp" "go.uber.org/multierr" - - "go.opentelemetry.io/collector/confmap" ) // Config defines the configuration for a component.Component. @@ -26,12 +22,6 @@ type Config any // for an interface type Foo is to use a *Foo value. var configValidatorType = reflect.TypeOf((*ConfigValidator)(nil)).Elem() -// UnmarshalConfig helper function to UnmarshalConfig a Config. -// Deprecated: [v0.101.0] Use conf.Unmarshal(&intoCfg) -func UnmarshalConfig(conf *confmap.Conf, intoCfg Config) error { - return conf.Unmarshal(intoCfg) -} - // ConfigValidator defines an optional interface for configurations to implement to do validation. type ConfigValidator interface { // Validate the configuration and returns an error if invalid. @@ -91,10 +81,10 @@ func callValidateIfPossible(v reflect.Value) error { } // If the pointer type implements ConfigValidator call Validate on the pointer to the current value. - if reflect.PtrTo(v.Type()).Implements(configValidatorType) { + if reflect.PointerTo(v.Type()).Implements(configValidatorType) { // If not addressable, then create a new *V pointer and set the value to current v. if !v.CanAddr() { - pv := reflect.New(reflect.PtrTo(v.Type()).Elem()) + pv := reflect.New(reflect.PointerTo(v.Type()).Elem()) pv.Elem().Set(v) v = pv.Elem() } @@ -103,72 +93,3 @@ func callValidateIfPossible(v reflect.Value) error { return nil } - -// Type is the component type as it is used in the config. -type Type struct { - name string -} - -// String returns the string representation of the type. -func (t Type) String() string { - return t.name -} - -// MarshalText marshals returns the Type name. -func (t Type) MarshalText() ([]byte, error) { - return []byte(t.name), nil -} - -// typeRegexp is used to validate the type of a component. -// A type must start with an ASCII alphabetic character and -// can only contain ASCII alphanumeric characters and '_'. -// This must be kept in sync with the regex in cmd/mdatagen/validate.go. -var typeRegexp = regexp.MustCompile(`^[a-zA-Z][0-9a-zA-Z_]{0,62}$`) - -// NewType creates a type. It returns an error if the type is invalid. -// A type must -// - have at least one character, -// - start with an ASCII alphabetic character and -// - can only contain ASCII alphanumeric characters and '_'. -func NewType(ty string) (Type, error) { - if len(ty) == 0 { - return Type{}, fmt.Errorf("id must not be empty") - } - if !typeRegexp.MatchString(ty) { - return Type{}, fmt.Errorf("invalid character(s) in type %q", ty) - } - return Type{name: ty}, nil -} - -// MustNewType creates a type. It panics if the type is invalid. -// A type must -// - have at least one character, -// - start with an ASCII alphabetic character and -// - can only contain ASCII alphanumeric characters and '_'. -func MustNewType(strType string) Type { - ty, err := NewType(strType) - if err != nil { - panic(err) - } - return ty -} - -// DataType is a special Type that represents the data types supported by the collector. We currently support -// collecting metrics, traces and logs, this can expand in the future. -type DataType = Type - -func mustNewDataType(strType string) DataType { - return MustNewType(strType) -} - -// Currently supported data types. Add new data types here when new types are supported in the future. -var ( - // DataTypeTraces is the data type tag for traces. - DataTypeTraces = mustNewDataType("traces") - - // DataTypeMetrics is the data type tag for metrics. - DataTypeMetrics = mustNewDataType("metrics") - - // DataTypeLogs is the data type tag for logs. - DataTypeLogs = mustNewDataType("logs") -) diff --git a/vendor/go.opentelemetry.io/collector/component/host.go b/vendor/go.opentelemetry.io/collector/component/host.go index 50472a6d88c..dc8210ffbe3 100644 --- a/vendor/go.opentelemetry.io/collector/component/host.go +++ b/vendor/go.opentelemetry.io/collector/component/host.go @@ -5,22 +5,13 @@ package component // import "go.opentelemetry.io/collector/component" // Host represents the entity that is hosting a Component. It is used to allow communication // between the Component and its host (normally the service.Collector is the host). +// +// Components may require `component.Host` to implement additional interfaces to properly function. +// The component is expected to cast the `component.Host` to the interface it needs and return +// an error if the type assertion fails. type Host interface { - // GetFactory of the specified kind. Returns the factory for a component type. - // This allows components to create other components. For example: - // func (r MyReceiver) Start(host component.Host) error { - // apacheFactory := host.GetFactory(KindReceiver,"apache").(receiver.Factory) - // receiver, err := apacheFactory.CreateMetrics(...) - // ... - // } - // - // GetFactory can be called by the component anytime after Component.Start() begins and - // until Component.Shutdown() ends. Note that the component is responsible for destroying - // other components that it creates. - GetFactory(kind Kind, componentType Type) Factory - // GetExtensions returns the map of extensions. Only enabled and created extensions will be returned. - // Typically is used to find an extension by type or by full config name. Both cases + // Typically, it is used to find an extension by type or by full config name. Both cases // can be done by iterating the returned map. There are typically very few extensions, // so there are no performance implications due to iteration. // diff --git a/vendor/go.opentelemetry.io/collector/component/identifiable.go b/vendor/go.opentelemetry.io/collector/component/identifiable.go index d2d65a5e24f..aebacf087e1 100644 --- a/vendor/go.opentelemetry.io/collector/component/identifiable.go +++ b/vendor/go.opentelemetry.io/collector/component/identifiable.go @@ -6,12 +6,69 @@ package component // import "go.opentelemetry.io/collector/component" import ( "errors" "fmt" + "regexp" "strings" ) // typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. const typeAndNameSeparator = "/" +var ( + // typeRegexp is used to validate the type of component. + // A type must start with an ASCII alphabetic character and + // can only contain ASCII alphanumeric characters and '_'. + // This must be kept in sync with the regex in cmd/mdatagen/validate.go. + typeRegexp = regexp.MustCompile(`^[a-zA-Z][0-9a-zA-Z_]{0,62}$`) + + // nameRegexp is used to validate the name of a component. A name can consist of + // 1 to 1024 Unicode characters excluding whitespace, control characters, and + // symbols. + nameRegexp = regexp.MustCompile(`^[^\pZ\pC\pS]+$`) +) + +// Type is the component type as it is used in the config. +type Type struct { + name string +} + +// String returns the string representation of the type. +func (t Type) String() string { + return t.name +} + +// MarshalText marshals returns the Type name. +func (t Type) MarshalText() ([]byte, error) { + return []byte(t.name), nil +} + +// NewType creates a type. It returns an error if the type is invalid. +// A type must +// - have at least one character, +// - start with an ASCII alphabetic character and +// - can only contain ASCII alphanumeric characters and '_'. +func NewType(ty string) (Type, error) { + if len(ty) == 0 { + return Type{}, errors.New("id must not be empty") + } + if !typeRegexp.MatchString(ty) { + return Type{}, fmt.Errorf("invalid character(s) in type %q", ty) + } + return Type{name: ty}, nil +} + +// MustNewType creates a type. It panics if the type is invalid. +// A type must +// - have at least one character, +// - start with an ASCII alphabetic character and +// - can only contain ASCII alphanumeric characters and '_'. +func MustNewType(strType string) Type { + ty, err := NewType(strType) + if err != nil { + panic(err) + } + return ty +} + // ID represents the identity for a component. It combines two values: // * type - the Type of the component. // * name - the name of that component. @@ -82,6 +139,9 @@ func (id *ID) UnmarshalText(text []byte) error { if nameStr == "" { return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) } + if err := validateName(nameStr); err != nil { + return fmt.Errorf("in %q id: %w", nameStr, err) + } } var err error @@ -101,3 +161,13 @@ func (id ID) String() string { return id.typeVal.String() + typeAndNameSeparator + id.nameVal } + +func validateName(nameStr string) error { + if len(nameStr) > 1024 { + return fmt.Errorf("name %q is longer than 1024 characters (%d characters)", nameStr, len(nameStr)) + } + if !nameRegexp.MatchString(nameStr) { + return fmt.Errorf("invalid character(s) in name %q", nameStr) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/component/status.go b/vendor/go.opentelemetry.io/collector/component/status.go deleted file mode 100644 index 8cd4d802644..00000000000 --- a/vendor/go.opentelemetry.io/collector/component/status.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package component // import "go.opentelemetry.io/collector/component" - -import ( - "time" -) - -type Status int32 - -// Enumeration of possible component statuses -const ( - StatusNone Status = iota - StatusStarting - StatusOK - StatusRecoverableError - StatusPermanentError - StatusFatalError - StatusStopping - StatusStopped -) - -// String returns a string representation of a Status -func (s Status) String() string { - switch s { - case StatusStarting: - return "StatusStarting" - case StatusOK: - return "StatusOK" - case StatusRecoverableError: - return "StatusRecoverableError" - case StatusPermanentError: - return "StatusPermanentError" - case StatusFatalError: - return "StatusFatalError" - case StatusStopping: - return "StatusStopping" - case StatusStopped: - return "StatusStopped" - } - return "StatusNone" -} - -// StatusEvent contains a status and timestamp, and can contain an error -type StatusEvent struct { - status Status - err error - timestamp time.Time -} - -// Status returns the Status (enum) associated with the StatusEvent -func (ev *StatusEvent) Status() Status { - return ev.status -} - -// Err returns the error associated with the StatusEvent. -func (ev *StatusEvent) Err() error { - return ev.err -} - -// Timestamp returns the timestamp associated with the StatusEvent -func (ev *StatusEvent) Timestamp() time.Time { - return ev.timestamp -} - -// NewStatusEvent creates and returns a StatusEvent with the specified status and sets the timestamp -// time.Now(). To set an error on the event for an error status use one of the dedicated -// constructors (e.g. NewRecoverableErrorEvent, NewPermanentErrorEvent, NewFatalErrorEvent) -func NewStatusEvent(status Status) *StatusEvent { - return &StatusEvent{ - status: status, - timestamp: time.Now(), - } -} - -// NewRecoverableErrorEvent creates and returns a StatusEvent with StatusRecoverableError, the -// specified error, and a timestamp set to time.Now(). -func NewRecoverableErrorEvent(err error) *StatusEvent { - ev := NewStatusEvent(StatusRecoverableError) - ev.err = err - return ev -} - -// NewPermanentErrorEvent creates and returns a StatusEvent with StatusPermanentError, the -// specified error, and a timestamp set to time.Now(). -func NewPermanentErrorEvent(err error) *StatusEvent { - ev := NewStatusEvent(StatusPermanentError) - ev.err = err - return ev -} - -// NewFatalErrorEvent creates and returns a StatusEvent with StatusFatalError, the -// specified error, and a timestamp set to time.Now(). -func NewFatalErrorEvent(err error) *StatusEvent { - ev := NewStatusEvent(StatusFatalError) - ev.err = err - return ev -} - -// AggregateStatus will derive a status for the given input using the following rules in order: -// 1. If all instances have the same status, there is nothing to aggregate, return it. -// 2. If any instance encounters a fatal error, the component is in a Fatal Error state. -// 3. If any instance is in a Permanent Error state, the component status is Permanent Error. -// 4. If any instance is Stopping, the component is in a Stopping state. -// 5. An instance is Stopped, but not all instances are Stopped, we must be in the process of Stopping the component. -// 6. If any instance is in a Recoverable Error state, the component status is Recoverable Error. -// 7. By process of elimination, the only remaining state is starting. -func AggregateStatus[K comparable](eventMap map[K]*StatusEvent) Status { - seen := make(map[Status]struct{}) - for _, ev := range eventMap { - seen[ev.Status()] = struct{}{} - } - - // All statuses are the same. Note, this will handle StatusOK and StatusStopped as these two - // cases require all components be in the same state. - if len(seen) == 1 { - for st := range seen { - return st - } - } - - // Handle mixed status cases - if _, isFatal := seen[StatusFatalError]; isFatal { - return StatusFatalError - } - - if _, isPermanent := seen[StatusPermanentError]; isPermanent { - return StatusPermanentError - } - - if _, isStopping := seen[StatusStopping]; isStopping { - return StatusStopping - } - - if _, isStopped := seen[StatusStopped]; isStopped { - return StatusStopping - } - - if _, isRecoverable := seen[StatusRecoverableError]; isRecoverable { - return StatusRecoverableError - } - - // By process of elimination, this is the last possible status; no check necessary. - return StatusStarting -} - -// StatusIsError returns true for error statuses (e.g. StatusRecoverableError, -// StatusPermanentError, or StatusFatalError) -func StatusIsError(status Status) bool { - return status == StatusRecoverableError || - status == StatusPermanentError || - status == StatusFatalError -} - -// AggregateStatusEvent returns a status event where: -// - The status is set to the aggregate status of the events in the eventMap -// - The timestamp is set to the latest timestamp of the events in the eventMap -// - For an error status, the event will have same error as the most current event of the same -// error type from the eventMap -func AggregateStatusEvent[K comparable](eventMap map[K]*StatusEvent) *StatusEvent { - var lastEvent, lastMatchingEvent *StatusEvent - aggregateStatus := AggregateStatus[K](eventMap) - - for _, ev := range eventMap { - if lastEvent == nil || lastEvent.timestamp.Before(ev.timestamp) { - lastEvent = ev - } - if aggregateStatus == ev.Status() && - (lastMatchingEvent == nil || lastMatchingEvent.timestamp.Before(ev.timestamp)) { - lastMatchingEvent = ev - } - } - - // the effective status matches an existing event - if lastEvent.Status() == aggregateStatus { - return lastEvent - } - - // the effective status requires a synthetic event - aggregateEvent := &StatusEvent{ - status: aggregateStatus, - timestamp: lastEvent.timestamp, - } - if StatusIsError(aggregateStatus) { - aggregateEvent.err = lastMatchingEvent.err - } - - return aggregateEvent -} diff --git a/vendor/go.opentelemetry.io/collector/component/telemetry.go b/vendor/go.opentelemetry.io/collector/component/telemetry.go index 17ca3dcab67..359562e5f92 100644 --- a/vendor/go.opentelemetry.io/collector/component/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/component/telemetry.go @@ -13,10 +13,6 @@ import ( ) // TelemetrySettings provides components with APIs to report telemetry. -// -// Note: there is a service version of this struct, servicetelemetry.TelemetrySettings, that mirrors -// this struct with the exception of ReportStatus. When adding or removing anything from -// this struct consider whether or not the same should be done for the service version. type TelemetrySettings struct { // Logger that the factory can use during creation and can pass to the created // component to be used later as well. @@ -28,15 +24,11 @@ type TelemetrySettings struct { // MeterProvider that the factory can pass to other instrumented third-party libraries. MeterProvider metric.MeterProvider - // MetricsLevel controls the level of detail for metrics emitted by the collector. - // Experimental: *NOTE* this field is experimental and may be changed or removed. + // MetricsLevel represents the configuration value set when the collector + // is configured. Components may use this level to decide whether it is + // appropriate to avoid computationally expensive calculations. MetricsLevel configtelemetry.Level // Resource contains the resource attributes for the collector's telemetry. Resource pcommon.Resource - - // ReportStatus allows a component to report runtime changes in status. The service - // will automatically report status for a component during startup and shutdown. Components can - // use this method to report status after start and before shutdown. - ReportStatus func(*StatusEvent) } diff --git a/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go b/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go index aa7002c270b..a3501cd0bed 100644 --- a/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go +++ b/vendor/go.opentelemetry.io/collector/config/configauth/configauth.go @@ -7,6 +7,7 @@ package configauth // import "go.opentelemetry.io/collector/config/configauth" import ( + "context" "errors" "fmt" @@ -33,7 +34,7 @@ func NewDefaultAuthentication() *Authentication { // GetServerAuthenticator attempts to select the appropriate auth.Server from the list of extensions, // based on the requested extension name. If an authenticator is not found, an error is returned. -func (a Authentication) GetServerAuthenticator(extensions map[component.ID]component.Component) (auth.Server, error) { +func (a Authentication) GetServerAuthenticator(_ context.Context, extensions map[component.ID]component.Component) (auth.Server, error) { if ext, found := extensions[a.AuthenticatorID]; found { if server, ok := ext.(auth.Server); ok { return server, nil @@ -47,7 +48,7 @@ func (a Authentication) GetServerAuthenticator(extensions map[component.ID]compo // GetClientAuthenticator attempts to select the appropriate auth.Client from the list of extensions, // based on the component id of the extension. If an authenticator is not found, an error is returned. // This should be only used by HTTP clients. -func (a Authentication) GetClientAuthenticator(extensions map[component.ID]component.Component) (auth.Client, error) { +func (a Authentication) GetClientAuthenticator(_ context.Context, extensions map[component.ID]component.Component) (auth.Client, error) { if ext, found := extensions[a.AuthenticatorID]; found { if client, ok := ext.(auth.Client); ok { return client, nil diff --git a/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go b/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go index 004e9558665..f5b47f9caa2 100644 --- a/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go +++ b/vendor/go.opentelemetry.io/collector/config/configcompression/compressiontype.go @@ -14,6 +14,7 @@ const ( TypeDeflate Type = "deflate" TypeSnappy Type = "snappy" TypeZstd Type = "zstd" + TypeLz4 Type = "lz4" typeNone Type = "none" typeEmpty Type = "" ) @@ -31,11 +32,11 @@ func (ct *Type) UnmarshalText(in []byte) error { typ == TypeDeflate || typ == TypeSnappy || typ == TypeZstd || + typ == TypeLz4 || typ == typeNone || typ == typeEmpty { *ct = typ return nil } return fmt.Errorf("unsupported compression type %q", typ) - } diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md b/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md index b9cf2f01be2..e0db0421e7e 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/README.md @@ -15,8 +15,8 @@ configuration parameters are also defined under `tls` like server configuration. For more information, see [configtls README](../configtls/README.md). -- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md) -- `compression` Compression type to use among `gzip`, `snappy`, `zstd`, and `none`. +- [`balancer_name`](https://github.com/grpc/grpc-go/blob/master/examples/features/load_balancing/README.md): Default before v0.103.0 is `pick_first`, default for v0.103.0 is `round_robin`. See [issue](https://github.com/open-telemetry/opentelemetry-collector/issues/10298). To restore the previous behavior, set `balancer_name` to `pick_first`. +- `compression`: Compression type to use among `gzip`, `snappy`, `zstd`, and `none`. - `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) - [`tls`](../configtls/README.md) - `headers`: name/value pairs added to the request diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go index b57a199461c..5def5bbb0a1 100644 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go +++ b/vendor/go.opentelemetry.io/collector/config/configgrpc/configgrpc.go @@ -8,26 +8,31 @@ import ( "crypto/tls" "errors" "fmt" + "math" "strings" "time" "github.com/mostynb/go-grpc-compression/nonclobbering/snappy" + "github.com/mostynb/go-grpc-compression/nonclobbering/zstd" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" "go.opentelemetry.io/collector/client" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/configcompression" - grpcInternal "go.opentelemetry.io/collector/config/configgrpc/internal" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtelemetry" @@ -55,6 +60,11 @@ func NewDefaultKeepaliveClientConfig() *KeepaliveClientConfig { } } +// BalancerName returns a string with default load balancer value +func BalancerName() string { + return "round_robin" +} + // ClientConfig defines common settings for a gRPC client configuration. type ClientConfig struct { // The target to which the exporter is going to send traces or metrics, @@ -102,9 +112,10 @@ type ClientConfig struct { // NewDefaultClientConfig returns a new instance of ClientConfig with default values. func NewDefaultClientConfig() *ClientConfig { return &ClientConfig{ - TLSSetting: configtls.NewDefaultClientConfig(), - Keepalive: NewDefaultKeepaliveClientConfig(), - Auth: configauth.NewDefaultAuthentication(), + TLSSetting: configtls.NewDefaultClientConfig(), + Keepalive: NewDefaultKeepaliveClientConfig(), + Auth: configauth.NewDefaultAuthentication(), + BalancerName: BalancerName(), } } @@ -161,7 +172,7 @@ type ServerConfig struct { TLSSetting *configtls.ServerConfig `mapstructure:"tls"` // MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. - MaxRecvMsgSizeMiB uint64 `mapstructure:"max_recv_msg_size_mib"` + MaxRecvMsgSizeMiB int `mapstructure:"max_recv_msg_size_mib"` // MaxConcurrentStreams sets the limit on the number of concurrent streams to each ServerTransport. // It has effect only for streaming RPCs. @@ -182,7 +193,6 @@ type ServerConfig struct { Auth *configauth.Authentication `mapstructure:"auth"` // Include propagates the incoming connection's metadata to downstream consumers. - // Experimental: *NOTE* this option is subject to change or removal in the future. IncludeMetadata bool `mapstructure:"include_metadata"` } @@ -194,6 +204,16 @@ func NewDefaultServerConfig() *ServerConfig { } } +func (gcs *ClientConfig) Validate() error { + if gcs.BalancerName != "" { + if balancer.Get(gcs.BalancerName) == nil { + return fmt.Errorf("invalid balancer_name: %s", gcs.BalancerName) + } + } + + return nil +} + // sanitizedEndpoint strips the prefix of either http:// or https:// from configgrpc.ClientConfig.Endpoint. func (gcs *ClientConfig) sanitizedEndpoint() string { switch { @@ -214,20 +234,45 @@ func (gcs *ClientConfig) isSchemeHTTPS() bool { return strings.HasPrefix(gcs.Endpoint, "https://") } +// ToClientConnOption is a sealed interface wrapping options for [ClientConfig.ToClientConn]. +type ToClientConnOption interface { + isToClientConnOption() +} + +type grpcDialOptionWrapper struct { + opt grpc.DialOption +} + +// WithGrpcDialOption wraps a [grpc.DialOption] into a [ToClientConnOption]. +func WithGrpcDialOption(opt grpc.DialOption) ToClientConnOption { + return grpcDialOptionWrapper{opt: opt} +} +func (grpcDialOptionWrapper) isToClientConnOption() {} + // ToClientConn creates a client connection to the given target. By default, it's // a non-blocking dial (the function won't wait for connections to be // established, and connecting happens in the background). To make it a blocking -// dial, use grpc.WithBlock() dial option. -func (gcs *ClientConfig) ToClientConn(_ context.Context, host component.Host, settings component.TelemetrySettings, extraOpts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts, err := gcs.toDialOptions(host, settings) +// dial, use the WithGrpcDialOption(grpc.WithBlock()) option. +func (gcs *ClientConfig) ToClientConn( + ctx context.Context, + host component.Host, + settings component.TelemetrySettings, + extraOpts ...ToClientConnOption, +) (*grpc.ClientConn, error) { + grpcOpts, err := gcs.getGrpcDialOptions(ctx, host, settings, extraOpts) if err != nil { return nil, err } - opts = append(opts, extraOpts...) - return grpc.NewClient(gcs.sanitizedEndpoint(), opts...) + //nolint:staticcheck //SA1019 see https://github.com/open-telemetry/opentelemetry-collector/pull/11575 + return grpc.DialContext(ctx, gcs.sanitizedEndpoint(), grpcOpts...) } -func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.TelemetrySettings) ([]grpc.DialOption, error) { +func (gcs *ClientConfig) getGrpcDialOptions( + ctx context.Context, + host component.Host, + settings component.TelemetrySettings, + extraOpts []ToClientConnOption, +) ([]grpc.DialOption, error) { var opts []grpc.DialOption if gcs.Compression.IsCompressed() { cp, err := getGRPCCompressionName(gcs.Compression) @@ -237,7 +282,7 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T opts = append(opts, grpc.WithDefaultCallOptions(grpc.UseCompressor(cp))) } - tlsCfg, err := gcs.TLSSetting.LoadTLSConfig(context.Background()) + tlsCfg, err := gcs.TLSSetting.LoadTLSConfig(ctx) if err != nil { return nil, err } @@ -271,7 +316,7 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T return nil, errors.New("no extensions configuration available") } - grpcAuthenticator, cerr := gcs.Auth.GetClientAuthenticator(host.GetExtensions()) + grpcAuthenticator, cerr := gcs.Auth.GetClientAuthenticator(ctx, host.GetExtensions()) if cerr != nil { return nil, cerr } @@ -284,10 +329,6 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T } if gcs.BalancerName != "" { - valid := validateBalancerName(gcs.BalancerName) - if !valid { - return nil, fmt.Errorf("invalid balancer_name: %s", gcs.BalancerName) - } opts = append(opts, grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"loadBalancingPolicy":"%s"}`, gcs.BalancerName))) } @@ -298,32 +339,71 @@ func (gcs *ClientConfig) toDialOptions(host component.Host, settings component.T otelOpts := []otelgrpc.Option{ otelgrpc.WithTracerProvider(settings.TracerProvider), otelgrpc.WithPropagators(otel.GetTextMapPropagator()), - } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelgrpc.WithMeterProvider(settings.MeterProvider)) + otelgrpc.WithMeterProvider(getLeveledMeterProvider(settings)), } // Enable OpenTelemetry observability plugin. opts = append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler(otelOpts...))) + for _, opt := range extraOpts { + if wrapper, ok := opt.(grpcDialOptionWrapper); ok { + opts = append(opts, wrapper.opt) + } + } + return opts, nil } -func validateBalancerName(balancerName string) bool { - return balancer.Get(balancerName) != nil +func (gss *ServerConfig) Validate() error { + if gss.MaxRecvMsgSizeMiB*1024*1024 < 0 { + return fmt.Errorf("invalid max_recv_msg_size_mib value, must be between 1 and %d: %d", math.MaxInt/1024/1024, gss.MaxRecvMsgSizeMiB) + } + + if gss.ReadBufferSize < 0 { + return fmt.Errorf("invalid read_buffer_size value: %d", gss.ReadBufferSize) + } + + if gss.WriteBufferSize < 0 { + return fmt.Errorf("invalid write_buffer_size value: %d", gss.WriteBufferSize) + } + + return nil +} + +// ToServerOption is a sealed interface wrapping options for [ServerConfig.ToServer]. +type ToServerOption interface { + isToServerOption() +} + +type grpcServerOptionWrapper struct { + opt grpc.ServerOption } -// ToServer returns a grpc.Server for the configuration -func (gss *ServerConfig) ToServer(_ context.Context, host component.Host, settings component.TelemetrySettings, extraOpts ...grpc.ServerOption) (*grpc.Server, error) { - opts, err := gss.toServerOption(host, settings) +// WithGrpcServerOption wraps a [grpc.ServerOption] into a [ToServerOption]. +func WithGrpcServerOption(opt grpc.ServerOption) ToServerOption { + return grpcServerOptionWrapper{opt: opt} +} +func (grpcServerOptionWrapper) isToServerOption() {} + +// ToServer returns a [grpc.Server] for the configuration. +func (gss *ServerConfig) ToServer( + _ context.Context, + host component.Host, + settings component.TelemetrySettings, + extraOpts ...ToServerOption, +) (*grpc.Server, error) { + grpcOpts, err := gss.getGrpcServerOptions(host, settings, extraOpts) if err != nil { return nil, err } - opts = append(opts, extraOpts...) - return grpc.NewServer(opts...), nil + return grpc.NewServer(grpcOpts...), nil } -func (gss *ServerConfig) toServerOption(host component.Host, settings component.TelemetrySettings) ([]grpc.ServerOption, error) { +func (gss *ServerConfig) getGrpcServerOptions( + host component.Host, + settings component.TelemetrySettings, + extraOpts []ToServerOption, +) ([]grpc.ServerOption, error) { switch gss.NetAddr.Transport { case confignet.TransportTypeTCP, confignet.TransportTypeTCP4, confignet.TransportTypeTCP6, confignet.TransportTypeUDP, confignet.TransportTypeUDP4, confignet.TransportTypeUDP6: internal.WarnOnUnspecifiedHost(settings.Logger, gss.NetAddr.Endpoint) @@ -339,8 +419,8 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. opts = append(opts, grpc.Creds(credentials.NewTLS(tlsCfg))) } - if gss.MaxRecvMsgSizeMiB > 0 { - opts = append(opts, grpc.MaxRecvMsgSize(int(gss.MaxRecvMsgSizeMiB*1024*1024))) + if gss.MaxRecvMsgSizeMiB > 0 && gss.MaxRecvMsgSizeMiB*1024*1024 > 0 { + opts = append(opts, grpc.MaxRecvMsgSize(gss.MaxRecvMsgSizeMiB*1024*1024)) } if gss.MaxConcurrentStreams > 0 { @@ -387,7 +467,7 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. var sInterceptors []grpc.StreamServerInterceptor if gss.Auth != nil { - authenticator, err := gss.Auth.GetServerAuthenticator(host.GetExtensions()) + authenticator, err := gss.Auth.GetServerAuthenticator(context.Background(), host.GetExtensions()) if err != nil { return nil, err } @@ -403,9 +483,7 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. otelOpts := []otelgrpc.Option{ otelgrpc.WithTracerProvider(settings.TracerProvider), otelgrpc.WithPropagators(otel.GetTextMapPropagator()), - } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelgrpc.WithMeterProvider(settings.MeterProvider)) + otelgrpc.WithMeterProvider(getLeveledMeterProvider(settings)), } // Enable OpenTelemetry observability plugin. @@ -415,6 +493,12 @@ func (gss *ServerConfig) toServerOption(host component.Host, settings component. opts = append(opts, grpc.StatsHandler(otelgrpc.NewServerHandler(otelOpts...)), grpc.ChainUnaryInterceptor(uInterceptors...), grpc.ChainStreamInterceptor(sInterceptors...)) + for _, opt := range extraOpts { + if wrapper, ok := opt.(grpcServerOptionWrapper); ok { + opts = append(opts, wrapper.opt) + } + } + return opts, nil } @@ -426,7 +510,7 @@ func getGRPCCompressionName(compressionType configcompression.Type) (string, err case configcompression.TypeSnappy: return snappy.Name, nil case configcompression.TypeZstd: - return grpcInternal.ZstdName, nil + return zstd.Name, nil default: return "", fmt.Errorf("unsupported compression type %q", compressionType) } @@ -473,7 +557,7 @@ func authUnaryServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServe ctx, err := server.Authenticate(ctx, headers) if err != nil { - return nil, err + return nil, status.Error(codes.Unauthenticated, err.Error()) } return handler(ctx, req) @@ -488,8 +572,15 @@ func authStreamServerInterceptor(srv any, stream grpc.ServerStream, _ *grpc.Stre ctx, err := server.Authenticate(ctx, headers) if err != nil { - return err + return status.Error(codes.Unauthenticated, err.Error()) } return handler(srv, wrapServerStream(ctx, stream)) } + +func getLeveledMeterProvider(settings component.TelemetrySettings) metric.MeterProvider { + if configtelemetry.LevelDetailed <= settings.MetricsLevel { + return settings.MeterProvider + } + return noop.MeterProvider{} +} diff --git a/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go b/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go deleted file mode 100644 index 0718b73535f..00000000000 --- a/vendor/go.opentelemetry.io/collector/config/configgrpc/internal/zstd.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright The OpenTelemetry Authors -// Copyright 2017 gRPC authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/config/configgrpc/internal" - -import ( - "errors" - "io" - "sync" - - "github.com/klauspost/compress/zstd" - "google.golang.org/grpc/encoding" -) - -const ZstdName = "zstd" - -func init() { - encoding.RegisterCompressor(NewZstdCodec()) -} - -type writer struct { - *zstd.Encoder - pool *sync.Pool -} - -func NewZstdCodec() encoding.Compressor { - c := &compressor{} - c.poolCompressor.New = func() any { - zw, _ := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1), zstd.WithWindowSize(512*1024)) - return &writer{Encoder: zw, pool: &c.poolCompressor} - } - return c -} - -func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { - z := c.poolCompressor.Get().(*writer) - z.Encoder.Reset(w) - return z, nil -} - -func (z *writer) Close() error { - defer z.pool.Put(z) - return z.Encoder.Close() -} - -type reader struct { - *zstd.Decoder - pool *sync.Pool -} - -func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { - z, inPool := c.poolDecompressor.Get().(*reader) - if !inPool { - newZ, err := zstd.NewReader(r) - if err != nil { - return nil, err - } - return &reader{Decoder: newZ, pool: &c.poolDecompressor}, nil - } - if err := z.Reset(r); err != nil { - c.poolDecompressor.Put(z) - return nil, err - } - return z, nil -} - -func (z *reader) Read(p []byte) (n int, err error) { - n, err = z.Decoder.Read(p) - if errors.Is(err, io.EOF) { - z.pool.Put(z) - } - return n, err -} - -func (c *compressor) Name() string { - return ZstdName -} - -type compressor struct { - poolCompressor sync.Pool - poolDecompressor sync.Pool -} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md index a0227c2402b..160041d8a5c 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/README.md +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/README.md @@ -23,7 +23,7 @@ README](../configtls/README.md). - [`read_buffer_size`](https://golang.org/pkg/net/http/#Transport) - [`timeout`](https://golang.org/pkg/net/http/#Client) - [`write_buffer_size`](https://golang.org/pkg/net/http/#Transport) -- `compression`: Compression type to use among `gzip`, `zstd`, `snappy`, `zlib`, and `deflate`. +- `compression`: Compression type to use among `gzip`, `zstd`, `snappy`, `zlib`, `deflate`, and `lz4`. - look at the documentation for the server-side of the communication. - `none` will be treated as uncompressed, and any other inputs will cause an error. - [`max_idle_conns`](https://golang.org/pkg/net/http/#Transport) @@ -34,6 +34,8 @@ README](../configtls/README.md). - [`disable_keep_alives`](https://golang.org/pkg/net/http/#Transport) - [`http2_read_idle_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) - [`http2_ping_timeout`](https://pkg.go.dev/golang.org/x/net/http2#Transport) +- [`cookies`](https://pkg.go.dev/net/http#CookieJar) + - [`enabled`] if enabled, the client will store cookies from server responses and reuse them in subsequent requests. Example: @@ -51,6 +53,8 @@ exporter: test1: "value1" "test 2": "value 2" compression: zstd + cookies: + enabled: true ``` ## Server Configuration @@ -64,8 +68,8 @@ is hosted at a different [origin][origin]. If left blank or set to `null`, CORS will not be enabled. - `allowed_origins`: A list of [origins][origin] allowed to send requests to the receiver. An origin may contain a wildcard (`*`) to replace 0 or more - characters (e.g., `https://*.example.com`). To allow any origin, set to - `["*"]`. If no origins are listed, CORS will not be enabled. + characters (e.g., `https://*.example.com`). **Do not use** a plain wildcard + `["*"]`, as our CORS response includes `Access-Control-Allow-Credentials: true`, which makes browsers to **disallow a plain wildcard** (this is a security standard). To allow any origin, you can specify at least the protocol, for example `["https://*", "http://*"]`. If no origins are listed, CORS will not be enabled. - `allowed_headers`: Allow CORS requests to include headers outside the [default safelist][cors-headers]. By default, safelist headers and `X-Requested-With` will be allowed. To allow any request header, set to @@ -74,9 +78,11 @@ will not be enabled. header, allowing clients to cache the response to CORS preflight requests. If not set, browsers use a default of 5 seconds. - `endpoint`: Valid value syntax available [here](https://github.com/grpc/grpc/blob/master/doc/naming.md) -- `max_request_body_size`: configures the maximum allowed body size in bytes for a single request. Default: `0` (no restriction) +- `max_request_body_size`: configures the maximum allowed body size in bytes for a single request. Default: `20971520` (20MiB) +- `compression_algorithms`: configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate", "lz4"] - [`tls`](../configtls/README.md) - [`auth`](../configauth/README.md) + - `request_params`: a list of query parameter names to add to the auth context, along with the HTTP headers You can enable [`attribute processor`][attribute-processor] to append any http header to span's attribute using custom key. You also need to enable the "include_metadata" @@ -89,6 +95,8 @@ receivers: http: include_metadata: true auth: + request_params: + - token authenticator: some-authenticator-extension cors: allowed_origins: @@ -98,11 +106,12 @@ receivers: - Example-Header max_age: 7200 endpoint: 0.0.0.0:55690 + compression_algorithms: ["", "gzip"] processors: attributes: actions: - key: http.client_ip - from_context: X-Forwarded-For + from_context: metadata.x-forwarded-for action: upsert ``` diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go new file mode 100644 index 00000000000..411a06a7f2b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compress_readcloser.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package confighttp // import "go.opentelemetry.io/collector/config/confighttp" + +import "io" + +// compressReadCloser couples the original compressed reader +// and the compression reader to ensure that the original body +// is correctly closed to ensure resources are freed. +type compressReadCloser struct { + io.Reader + orig io.ReadCloser +} + +var ( + _ io.Reader = (*compressReadCloser)(nil) + _ io.Closer = (*compressReadCloser)(nil) +) + +func (crc *compressReadCloser) Close() error { + return crc.orig.Close() +} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go index a700bec845b..144b30affe9 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compression.go @@ -15,6 +15,7 @@ import ( "github.com/golang/snappy" "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" "go.opentelemetry.io/collector/config/configcompression" ) @@ -25,6 +26,56 @@ type compressRoundTripper struct { compressor *compressor } +var availableDecoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){ + "": func(io.ReadCloser) (io.ReadCloser, error) { + // Not a compressed payload. Nothing to do. + return nil, nil + }, + "gzip": func(body io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(body) + if err != nil { + return nil, err + } + return gr, nil + }, + "zstd": func(body io.ReadCloser) (io.ReadCloser, error) { + zr, err := zstd.NewReader( + body, + // Concurrency 1 disables async decoding. We don't need async decoding, it is pointless + // for our use-case (a server accepting decoding http requests). + // Disabling async improves performance (I benchmarked it previously when working + // on https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/23257). + zstd.WithDecoderConcurrency(1), + ) + if err != nil { + return nil, err + } + return zr.IOReadCloser(), nil + }, + "zlib": func(body io.ReadCloser) (io.ReadCloser, error) { + zr, err := zlib.NewReader(body) + if err != nil { + return nil, err + } + return zr, nil + }, + //nolint:unparam // Ignoring the linter request to remove error return since it needs to match the method signature + "snappy": func(body io.ReadCloser) (io.ReadCloser, error) { + // Lazy Reading content to improve memory efficiency + return &compressReadCloser{ + Reader: snappy.NewReader(body), + orig: body, + }, nil + }, + //nolint:unparam // Ignoring the linter request to remove error return since it needs to match the method signature + "lz4": func(body io.ReadCloser) (io.ReadCloser, error) { + return &compressReadCloser{ + Reader: lz4.NewReader(body), + orig: body, + }, nil + }, +} + func newCompressRoundTripper(rt http.RoundTripper, compressionType configcompression.Type) (*compressRoundTripper, error) { encoder, err := newCompressor(compressionType) if err != nil { @@ -76,65 +127,27 @@ type decompressor struct { // httpContentDecompressor offloads the task of handling compressed HTTP requests // by identifying the compression format in the "Content-Encoding" header and re-writing // request body so that the handlers further in the chain can work on decompressed data. -// It supports gzip and deflate/zlib compression. -func httpContentDecompressor(h http.Handler, maxRequestBodySize int64, eh func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int), decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error)) http.Handler { +func httpContentDecompressor(h http.Handler, maxRequestBodySize int64, eh func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int), enableDecoders []string, decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error)) http.Handler { errHandler := defaultErrorHandler if eh != nil { errHandler = eh } + enabled := map[string]func(body io.ReadCloser) (io.ReadCloser, error){} + for _, dec := range enableDecoders { + enabled[dec] = availableDecoders[dec] + + if dec == "deflate" { + enabled["deflate"] = availableDecoders["zlib"] + } + } + d := &decompressor{ maxRequestBodySize: maxRequestBodySize, errHandler: errHandler, base: h, - decoders: map[string]func(body io.ReadCloser) (io.ReadCloser, error){ - "": func(io.ReadCloser) (io.ReadCloser, error) { - // Not a compressed payload. Nothing to do. - return nil, nil - }, - "gzip": func(body io.ReadCloser) (io.ReadCloser, error) { - gr, err := gzip.NewReader(body) - if err != nil { - return nil, err - } - return gr, nil - }, - "zstd": func(body io.ReadCloser) (io.ReadCloser, error) { - zr, err := zstd.NewReader( - body, - // Concurrency 1 disables async decoding. We don't need async decoding, it is pointless - // for our use-case (a server accepting decoding http requests). - // Disabling async improves performance (I benchmarked it previously when working - // on https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/23257). - zstd.WithDecoderConcurrency(1), - ) - if err != nil { - return nil, err - } - return zr.IOReadCloser(), nil - }, - "zlib": func(body io.ReadCloser) (io.ReadCloser, error) { - zr, err := zlib.NewReader(body) - if err != nil { - return nil, err - } - return zr, nil - }, - "snappy": func(body io.ReadCloser) (io.ReadCloser, error) { - sr := snappy.NewReader(body) - sb := new(bytes.Buffer) - _, err := io.Copy(sb, sr) - if err != nil { - return nil, err - } - if err = body.Close(); err != nil { - return nil, err - } - return io.NopCloser(sb), nil - }, - }, + decoders: enabled, } - d.decoders["deflate"] = d.decoders["zlib"] for key, dec := range decoders { d.decoders[key] = dec diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go b/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go index 660fa83ce51..5c8fefa92cc 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/compressor.go @@ -13,6 +13,7 @@ import ( "github.com/golang/snappy" "github.com/klauspost/compress/zstd" + "github.com/pierrec/lz4/v4" "go.opentelemetry.io/collector/config/configcompression" ) @@ -32,6 +33,13 @@ var ( zStdPool = &compressor{pool: sync.Pool{New: func() any { zw, _ := zstd.NewWriter(nil, zstd.WithEncoderConcurrency(1)); return zw }}} _ writeCloserReset = (*zlib.Writer)(nil) zLibPool = &compressor{pool: sync.Pool{New: func() any { return zlib.NewWriter(nil) }}} + _ writeCloserReset = (*lz4.Writer)(nil) + lz4Pool = &compressor{pool: sync.Pool{New: func() any { + lz := lz4.NewWriter(nil) + // Setting concurrency to 1 to disable async decoding by goroutines. This will reduce the overall memory footprint and pool + _ = lz.Apply(lz4.ConcurrencyOption(1)) + return lz + }}} ) type compressor struct { @@ -50,6 +58,8 @@ func newCompressor(compressionType configcompression.Type) (*compressor, error) return zStdPool, nil case configcompression.TypeZlib, configcompression.TypeDeflate: return zLibPool, nil + case configcompression.TypeLz4: + return lz4Pool, nil } return nil, errors.New("unsupported compression type, ") } diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go b/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go index 71b2f17ee2f..c8ab4544aeb 100644 --- a/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/confighttp.go @@ -11,26 +11,35 @@ import ( "io" "net" "net/http" + "net/http/cookiejar" "net/url" "time" "github.com/rs/cors" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" "golang.org/x/net/http2" + "golang.org/x/net/publicsuffix" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configauth" "go.opentelemetry.io/collector/config/configcompression" + "go.opentelemetry.io/collector/config/confighttp/internal" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configtelemetry" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/config/internal" + configinternal "go.opentelemetry.io/collector/config/internal" "go.opentelemetry.io/collector/extension/auth" ) -const headerContentEncoding = "Content-Encoding" -const defaultMaxRequestBodySize = 20 * 1024 * 1024 // 20MiB +const ( + headerContentEncoding = "Content-Encoding" + defaultMaxRequestBodySize = 20 * 1024 * 1024 // 20MiB +) + +var defaultCompressionAlgorithms = []string{"", "gzip", "zstd", "zlib", "snappy", "deflate", "lz4"} // ClientConfig defines settings for creating an HTTP client. type ClientConfig struct { @@ -44,12 +53,15 @@ type ClientConfig struct { TLSSetting configtls.ClientConfig `mapstructure:"tls"` // ReadBufferSize for HTTP client. See http.Transport.ReadBufferSize. + // Default is 0. ReadBufferSize int `mapstructure:"read_buffer_size"` // WriteBufferSize for HTTP client. See http.Transport.WriteBufferSize. + // Default is 0. WriteBufferSize int `mapstructure:"write_buffer_size"` // Timeout parameter configures `http.Client.Timeout`. + // Default is 0 (unlimited). Timeout time.Duration `mapstructure:"timeout"` // Additional headers attached to each HTTP request sent by the client. @@ -57,9 +69,6 @@ type ClientConfig struct { // Header values are opaque since they may be sensitive. Headers map[string]configopaque.String `mapstructure:"headers"` - // Custom Round Tripper to allow for individual components to intercept HTTP requests - CustomRoundTripper func(next http.RoundTripper) (http.RoundTripper, error) - // Auth configuration for outgoing HTTP calls. Auth *configauth.Authentication `mapstructure:"auth"` @@ -67,20 +76,20 @@ type ClientConfig struct { Compression configcompression.Type `mapstructure:"compression"` // MaxIdleConns is used to set a limit to the maximum idle HTTP connections the client can keep open. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to 100. MaxIdleConns *int `mapstructure:"max_idle_conns"` // MaxIdleConnsPerHost is used to set a limit to the maximum idle HTTP connections the host can keep open. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to [http.DefaultTransport.MaxIdleConnsPerHost]. MaxIdleConnsPerHost *int `mapstructure:"max_idle_conns_per_host"` // MaxConnsPerHost limits the total number of connections per host, including connections in the dialing, // active, and idle states. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to [http.DefaultTransport.MaxConnsPerHost]. MaxConnsPerHost *int `mapstructure:"max_conns_per_host"` // IdleConnTimeout is the maximum amount of time a connection will remain open before closing itself. - // There's an already set value, and we want to override it only if an explicit value provided + // By default, it is set to [http.DefaultTransport.IdleConnTimeout] IdleConnTimeout *time.Duration `mapstructure:"idle_conn_timeout"` // DisableKeepAlives, if true, disables HTTP keep-alives and will only use the connection to the server @@ -100,20 +109,32 @@ type ClientConfig struct { // HTTP2PingTimeout if there's no response to the ping within the configured value, the connection will be closed. // If not set or set to 0, it defaults to 15s. HTTP2PingTimeout time.Duration `mapstructure:"http2_ping_timeout"` + // Cookies configures the cookie management of the HTTP client. + Cookies *CookiesConfig `mapstructure:"cookies"` +} + +// CookiesConfig defines the configuration of the HTTP client regarding cookies served by the server. +type CookiesConfig struct { + // Enabled if true, cookies from HTTP responses will be reused in further HTTP requests with the same server. + Enabled bool `mapstructure:"enabled"` } // NewDefaultClientConfig returns ClientConfig type object with -// the default values of 'MaxIdleConns' and 'IdleConnTimeout'. +// the default values of 'MaxIdleConns' and 'IdleConnTimeout', as well as [http.DefaultTransport] values. // Other config options are not added as they are initialized with 'zero value' by GoLang as default. // We encourage to use this function to create an object of ClientConfig. func NewDefaultClientConfig() ClientConfig { // The default values are taken from the values of 'DefaultTransport' of 'http' package. - maxIdleConns := 100 - idleConnTimeout := 90 * time.Second + defaultTransport := http.DefaultTransport.(*http.Transport) return ClientConfig{ - MaxIdleConns: &maxIdleConns, - IdleConnTimeout: &idleConnTimeout, + ReadBufferSize: defaultTransport.ReadBufferSize, + WriteBufferSize: defaultTransport.WriteBufferSize, + Headers: map[string]configopaque.String{}, + MaxIdleConns: &defaultTransport.MaxIdleConns, + MaxIdleConnsPerHost: &defaultTransport.MaxIdleConnsPerHost, + MaxConnsPerHost: &defaultTransport.MaxConnsPerHost, + IdleConnTimeout: &defaultTransport.IdleConnTimeout, } } @@ -181,7 +202,7 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett return nil, errors.New("extensions configuration not found") } - httpCustomAuthRoundTripper, aerr := hcs.Auth.GetClientAuthenticator(ext) + httpCustomAuthRoundTripper, aerr := hcs.Auth.GetClientAuthenticator(ctx, ext) if aerr != nil { return nil, aerr } @@ -211,18 +232,16 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett otelOpts := []otelhttp.Option{ otelhttp.WithTracerProvider(settings.TracerProvider), otelhttp.WithPropagators(otel.GetTextMapPropagator()), + otelhttp.WithMeterProvider(getLeveledMeterProvider(settings)), } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelhttp.WithMeterProvider(settings.MeterProvider)) - } - // wrapping http transport with otelhttp transport to enable otel instrumentation if settings.TracerProvider != nil && settings.MeterProvider != nil { clientTransport = otelhttp.NewTransport(clientTransport, otelOpts...) } - if hcs.CustomRoundTripper != nil { - clientTransport, err = hcs.CustomRoundTripper(clientTransport) + var jar http.CookieJar + if hcs.Cookies != nil && hcs.Cookies.Enabled { + jar, err = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) if err != nil { return nil, err } @@ -231,6 +250,7 @@ func (hcs *ClientConfig) ToClient(ctx context.Context, host component.Host, sett return &http.Client{ Transport: clientTransport, Timeout: hcs.Timeout, + Jar: jar, }, nil } @@ -268,18 +288,74 @@ type ServerConfig struct { CORS *CORSConfig `mapstructure:"cors"` // Auth for this receiver - Auth *configauth.Authentication `mapstructure:"auth"` + Auth *AuthConfig `mapstructure:"auth"` // MaxRequestBodySize sets the maximum request body size in bytes. Default: 20MiB. MaxRequestBodySize int64 `mapstructure:"max_request_body_size"` // IncludeMetadata propagates the client metadata from the incoming requests to the downstream consumers - // Experimental: *NOTE* this option is subject to change or removal in the future. IncludeMetadata bool `mapstructure:"include_metadata"` // Additional headers attached to each HTTP response sent to the client. // Header values are opaque since they may be sensitive. ResponseHeaders map[string]configopaque.String `mapstructure:"response_headers"` + + // CompressionAlgorithms configures the list of compression algorithms the server can accept. Default: ["", "gzip", "zstd", "zlib", "snappy", "deflate"] + CompressionAlgorithms []string `mapstructure:"compression_algorithms"` + + // ReadTimeout is the maximum duration for reading the entire + // request, including the body. A zero or negative value means + // there will be no timeout. + // + // Because ReadTimeout does not let Handlers make per-request + // decisions on each request body's acceptable deadline or + // upload rate, most users will prefer to use + // ReadHeaderTimeout. It is valid to use them both. + ReadTimeout time.Duration `mapstructure:"read_timeout"` + + // ReadHeaderTimeout is the amount of time allowed to read + // request headers. The connection's read deadline is reset + // after reading the headers and the Handler can decide what + // is considered too slow for the body. If ReadHeaderTimeout + // is zero, the value of ReadTimeout is used. If both are + // zero, there is no timeout. + ReadHeaderTimeout time.Duration `mapstructure:"read_header_timeout"` + + // WriteTimeout is the maximum duration before timing out + // writes of the response. It is reset whenever a new + // request's header is read. Like ReadTimeout, it does not + // let Handlers make decisions on a per-request basis. + // A zero or negative value means there will be no timeout. + WriteTimeout time.Duration `mapstructure:"write_timeout"` + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. If IdleTimeout + // is zero, the value of ReadTimeout is used. If both are + // zero, there is no timeout. + IdleTimeout time.Duration `mapstructure:"idle_timeout"` +} + +// NewDefaultServerConfig returns ServerConfig type object with default values. +// We encourage to use this function to create an object of ServerConfig. +func NewDefaultServerConfig() ServerConfig { + tlsDefaultServerConfig := configtls.NewDefaultServerConfig() + return ServerConfig{ + ResponseHeaders: map[string]configopaque.String{}, + TLSSetting: &tlsDefaultServerConfig, + CORS: NewDefaultCORSConfig(), + WriteTimeout: 30 * time.Second, + ReadHeaderTimeout: 1 * time.Minute, + IdleTimeout: 1 * time.Minute, + } +} + +type AuthConfig struct { + // Auth for this receiver. + configauth.Authentication `mapstructure:",squash"` + + // RequestParameters is a list of parameters that should be extracted from the request and added to the context. + // When a parameter is found in both the query string and the header, the value from the query string will be used. + RequestParameters []string `mapstructure:"request_params"` } // ToListener creates a net.Listener. @@ -304,60 +380,65 @@ func (hss *ServerConfig) ToListener(ctx context.Context) (net.Listener, error) { // toServerOptions has options that change the behavior of the HTTP server // returned by ServerConfig.ToServer(). -type toServerOptions struct { - errHandler func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int) - decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error) -} +type toServerOptions = internal.ToServerOptions // ToServerOption is an option to change the behavior of the HTTP server // returned by ServerConfig.ToServer(). -type ToServerOption func(opts *toServerOptions) +type ToServerOption = internal.ToServerOption // WithErrorHandler overrides the HTTP error handler that gets invoked // when there is a failure inside httpContentDecompressor. func WithErrorHandler(e func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int)) ToServerOption { - return func(opts *toServerOptions) { - opts.errHandler = e - } + return internal.ToServerOptionFunc(func(opts *toServerOptions) { + opts.ErrHandler = e + }) } // WithDecoder provides support for additional decoders to be configured // by the caller. func WithDecoder(key string, dec func(body io.ReadCloser) (io.ReadCloser, error)) ToServerOption { - return func(opts *toServerOptions) { - if opts.decoders == nil { - opts.decoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){} + return internal.ToServerOptionFunc(func(opts *toServerOptions) { + if opts.Decoders == nil { + opts.Decoders = map[string]func(body io.ReadCloser) (io.ReadCloser, error){} } - opts.decoders[key] = dec - } + opts.Decoders[key] = dec + }) } // ToServer creates an http.Server from settings object. func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settings component.TelemetrySettings, handler http.Handler, opts ...ToServerOption) (*http.Server, error) { - internal.WarnOnUnspecifiedHost(settings.Logger, hss.Endpoint) + configinternal.WarnOnUnspecifiedHost(settings.Logger, hss.Endpoint) serverOpts := &toServerOptions{} - for _, o := range opts { - o(serverOpts) - } + serverOpts.Apply(opts...) if hss.MaxRequestBodySize <= 0 { hss.MaxRequestBodySize = defaultMaxRequestBodySize } - handler = httpContentDecompressor(handler, hss.MaxRequestBodySize, serverOpts.errHandler, serverOpts.decoders) + if hss.CompressionAlgorithms == nil { + hss.CompressionAlgorithms = defaultCompressionAlgorithms + } + + handler = httpContentDecompressor( + handler, + hss.MaxRequestBodySize, + serverOpts.ErrHandler, + hss.CompressionAlgorithms, + serverOpts.Decoders, + ) if hss.MaxRequestBodySize > 0 { handler = maxRequestBodySizeInterceptor(handler, hss.MaxRequestBodySize) } if hss.Auth != nil { - server, err := hss.Auth.GetServerAuthenticator(host.GetExtensions()) + server, err := hss.Auth.GetServerAuthenticator(context.Background(), host.GetExtensions()) if err != nil { return nil, err } - handler = authInterceptor(handler, server) + handler = authInterceptor(handler, server, hss.Auth.RequestParameters) } if hss.CORS != nil && len(hss.CORS.AllowedOrigins) > 0 { @@ -377,16 +458,16 @@ func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settin handler = responseHeadersHandler(handler, hss.ResponseHeaders) } - otelOpts := []otelhttp.Option{ - otelhttp.WithTracerProvider(settings.TracerProvider), - otelhttp.WithPropagators(otel.GetTextMapPropagator()), - otelhttp.WithSpanNameFormatter(func(_ string, r *http.Request) string { - return r.URL.Path - }), - } - if settings.MetricsLevel >= configtelemetry.LevelDetailed { - otelOpts = append(otelOpts, otelhttp.WithMeterProvider(settings.MeterProvider)) - } + otelOpts := append( + []otelhttp.Option{ + otelhttp.WithTracerProvider(settings.TracerProvider), + otelhttp.WithPropagators(otel.GetTextMapPropagator()), + otelhttp.WithSpanNameFormatter(func(_ string, r *http.Request) string { + return r.URL.Path + }), + otelhttp.WithMeterProvider(getLeveledMeterProvider(settings)), + }, + serverOpts.OtelhttpOpts...) // Enable OpenTelemetry observability plugin. // TODO: Consider to use component ID string as prefix for all the operations. @@ -398,9 +479,15 @@ func (hss *ServerConfig) ToServer(_ context.Context, host component.Host, settin includeMetadata: hss.IncludeMetadata, } - return &http.Server{ - Handler: handler, - }, nil + server := &http.Server{ + Handler: handler, + ReadTimeout: hss.ReadTimeout, + ReadHeaderTimeout: hss.ReadHeaderTimeout, + WriteTimeout: hss.WriteTimeout, + IdleTimeout: hss.IdleTimeout, + } + + return server, nil } func responseHeadersHandler(handler http.Handler, headers map[string]configopaque.String) http.Handler { @@ -437,9 +524,21 @@ type CORSConfig struct { MaxAge int `mapstructure:"max_age"` } -func authInterceptor(next http.Handler, server auth.Server) http.Handler { +// NewDefaultCORSConfig creates a default cross-origin resource sharing (CORS) configuration. +func NewDefaultCORSConfig() *CORSConfig { + return &CORSConfig{} +} + +func authInterceptor(next http.Handler, server auth.Server, requestParams []string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx, err := server.Authenticate(r.Context(), r.Header) + sources := r.Header + query := r.URL.Query() + for _, param := range requestParams { + if val, ok := query[param]; ok { + sources[param] = val + } + } + ctx, err := server.Authenticate(r.Context(), sources) if err != nil { http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) return @@ -455,3 +554,10 @@ func maxRequestBodySizeInterceptor(next http.Handler, maxRecvSize int64) http.Ha next.ServeHTTP(w, r) }) } + +func getLeveledMeterProvider(settings component.TelemetrySettings) metric.MeterProvider { + if configtelemetry.LevelDetailed <= settings.MetricsLevel { + return settings.MeterProvider + } + return noop.MeterProvider{} +} diff --git a/vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go b/vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go new file mode 100644 index 00000000000..e8b9612f39a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/config/confighttp/internal/options.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/config/confighttp/internal" + +import ( + "io" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// toServerOptions has options that change the behavior of the HTTP server +// returned by ServerConfig.ToServer(). +type ToServerOptions struct { + ErrHandler func(w http.ResponseWriter, r *http.Request, errorMsg string, statusCode int) + Decoders map[string]func(body io.ReadCloser) (io.ReadCloser, error) + OtelhttpOpts []otelhttp.Option +} + +func (tso *ToServerOptions) Apply(opts ...ToServerOption) { + for _, o := range opts { + o.apply(tso) + } +} + +// ToServerOption is an option to change the behavior of the HTTP server +// returned by ServerConfig.ToServer(). +type ToServerOption interface { + apply(*ToServerOptions) +} + +// ToServerOptionFunc converts a function into ToServerOption interface. +type ToServerOptionFunc func(*ToServerOptions) + +func (of ToServerOptionFunc) apply(e *ToServerOptions) { + of(e) +} diff --git a/vendor/go.opentelemetry.io/collector/config/confignet/README.md b/vendor/go.opentelemetry.io/collector/config/confignet/README.md index cd8150e6303..02bc3d1b237 100644 --- a/vendor/go.opentelemetry.io/collector/config/confignet/README.md +++ b/vendor/go.opentelemetry.io/collector/config/confignet/README.md @@ -13,7 +13,8 @@ leverage network configuration to set connection and transport information. - `transport`: Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". -- `dialer_timeout`: DialerTimeout is the maximum amount of time a dial will wait for a connect to complete. The default is no timeout. +- `dialer`: Dialer configuration + - `timeout`: Dialer timeout is the maximum amount of time a dial will wait for a connect to complete. The default is no timeout. Note that for TCP receivers only the `endpoint` configuration setting is required. diff --git a/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go b/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go index 1fc3f8c5852..476c350b05f 100644 --- a/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go +++ b/vendor/go.opentelemetry.io/collector/config/configretry/backoff.go @@ -68,7 +68,6 @@ func (bs *BackOffConfig) Validate() error { if bs.MaxElapsedTime < bs.MaxInterval { return errors.New("'max_elapsed_time' must not be less than 'max_interval'") } - } return nil } diff --git a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go index e3a6bdfe321..646aeb2d7c7 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go +++ b/vendor/go.opentelemetry.io/collector/config/configtelemetry/doc.go @@ -4,4 +4,44 @@ // Package configtelemetry defines various telemetry level for configuration. // It enables every component to have access to telemetry level // to enable metrics only when necessary. +// +// This document provides guidance on which telemetry level to adopt for Collector metrics. +// When adopting a telemetry level, component authors are expected to rely on this guidance to +// justify their choice of telemetry level. +// +// 1. configtelemetry.None +// +// No telemetry data is recorded. +// +// 2. configtelemetry.Basic +// +// Telemetry associated with this level provides essential coverage of the collector telemetry. +// It should only be used for internal collector telemetry generated by the collector core API. Components outside of +// the core API MUST NOT record additional telemetry at this level. +// +// 3. configtelemetry.Normal +// +// Telemetry associated with this level provides complete coverage of the collector telemetry. +// It should be the default for component authors. +// +// Component authors using this telemetry level can use this guidance: +// +// - The signals associated with this level must control cardinality. +// It is acceptable at this level for cardinality to scale linearly with the monitored resources. +// +// - The signals associated with this level must represent a controlled data volume. Examples follow: +// +// a. A max cardinality (total possible combinations of dimension values) for a given metric of at most 100. +// +// b. At most 5 spans actively recording simultaneously per active request. +// +// This is the default level recommended when running the Collector. +// +// 4. configtelemetry.Detailed +// +// Telemetry associated with this level provides complete coverage of the collector telemetry. +// +// The signals associated with this level may exhibit high cardinality and/or high dimensionality. +// +// There is no limit on data volume. package configtelemetry // import "go.opentelemetry.io/collector/config/configtelemetry" diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go b/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go index 1ee9c72ef9f..e6ca9b2962e 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go +++ b/vendor/go.opentelemetry.io/collector/config/configtls/clientcasfilereloader.go @@ -6,6 +6,7 @@ package configtls // import "go.opentelemetry.io/collector/config/configtls" import ( "crypto/tls" "crypto/x509" + "errors" "fmt" "sync" @@ -78,7 +79,7 @@ func (r *clientCAsFileReloader) getLastError() error { func (r *clientCAsFileReloader) startWatching() error { if r.shutdownCH != nil { - return fmt.Errorf("client CA file watcher already started") + return errors.New("client CA file watcher already started") } watcher, err := fsnotify.NewWatcher() @@ -132,7 +133,7 @@ func (r *clientCAsFileReloader) handleWatcherEvents() { func (r *clientCAsFileReloader) shutdown() error { if r.shutdownCH == nil { - return fmt.Errorf("client CAs file watcher is not running") + return errors.New("client CAs file watcher is not running") } r.shutdownCH <- true close(r.shutdownCH) diff --git a/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go b/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go index 2ce0490b16c..0cbcc5f4072 100644 --- a/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go +++ b/vendor/go.opentelemetry.io/collector/config/configtls/configtls.go @@ -179,7 +179,7 @@ func (r *certReloader) GetCertificate() (*tls.Certificate, error) { func (c Config) Validate() error { if c.hasCAFile() && c.hasCAPem() { - return fmt.Errorf("provide either a CA file or the PEM-encoded string, but not both") + return errors.New("provide either a CA file or the PEM-encoded string, but not both") } minTLS, err := convertVersion(c.MinVersion, defaultMinTLSVersion) @@ -269,7 +269,7 @@ func (c Config) loadCACertPool() (*x509.CertPool, error) { switch { case c.hasCAFile() && c.hasCAPem(): - return nil, fmt.Errorf("failed to load CA CertPool: provide either a CA file or the PEM-encoded string, but not both") + return nil, errors.New("failed to load CA CertPool: provide either a CA file or the PEM-encoded string, but not both") case c.hasCAFile(): // Set up user specified truststore from file certPool, err = c.loadCertFile(c.CAFile) @@ -308,7 +308,7 @@ func (c Config) loadCertPem(certPem []byte) (*x509.CertPool, error) { } } if !certPool.AppendCertsFromPEM(certPem) { - return nil, fmt.Errorf("failed to parse cert") + return nil, errors.New("failed to parse cert") } return certPool, nil } @@ -316,13 +316,13 @@ func (c Config) loadCertPem(certPem []byte) (*x509.CertPool, error) { func (c Config) loadCertificate() (tls.Certificate, error) { switch { case c.hasCert() != c.hasKey(): - return tls.Certificate{}, fmt.Errorf("for auth via TLS, provide both certificate and key, or neither") + return tls.Certificate{}, errors.New("for auth via TLS, provide both certificate and key, or neither") case !c.hasCert() && !c.hasKey(): return tls.Certificate{}, nil case c.hasCertFile() && c.hasCertPem(): - return tls.Certificate{}, fmt.Errorf("for auth via TLS, provide either a certificate or the PEM-encoded string, but not both") + return tls.Certificate{}, errors.New("for auth via TLS, provide either a certificate or the PEM-encoded string, but not both") case c.hasKeyFile() && c.hasKeyPem(): - return tls.Certificate{}, fmt.Errorf("for auth via TLS, provide either a key or the PEM-encoded string, but not both") + return tls.Certificate{}, errors.New("for auth via TLS, provide either a key or the PEM-encoded string, but not both") } var certPem, keyPem []byte diff --git a/vendor/go.opentelemetry.io/collector/config/internal/warning.go b/vendor/go.opentelemetry.io/collector/config/internal/warning.go index f9e32fc1c64..0f5f6363431 100644 --- a/vendor/go.opentelemetry.io/collector/config/internal/warning.go +++ b/vendor/go.opentelemetry.io/collector/config/internal/warning.go @@ -9,8 +9,6 @@ import ( "strings" "go.uber.org/zap" - - "go.opentelemetry.io/collector/internal/localhostgate" ) func shouldWarn(endpoint string) bool { @@ -38,14 +36,13 @@ func shouldWarn(endpoint string) bool { // WarnOnUnspecifiedHost emits a warning if an endpoint has an unspecified host. func WarnOnUnspecifiedHost(logger *zap.Logger, endpoint string) { - if !localhostgate.UseLocalHostAsDefaultHostfeatureGate.IsEnabled() && shouldWarn(endpoint) { + if shouldWarn(endpoint) { logger.Warn( - "Using the 0.0.0.0 address exposes this server to every network interface, which may facilitate Denial of Service attacks. Enable the feature gate to change the default and remove this warning.", + "Using the 0.0.0.0 address exposes this server to every network interface, which may facilitate Denial of Service attacks.", zap.String( "documentation", "https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks", ), - zap.String("feature gate ID", localhostgate.UseLocalHostAsDefaultHostID), ) } } diff --git a/vendor/go.opentelemetry.io/collector/confmap/README.md b/vendor/go.opentelemetry.io/collector/confmap/README.md index 42dcf548d89..bfa24c8bcc0 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/README.md +++ b/vendor/go.opentelemetry.io/collector/confmap/README.md @@ -102,3 +102,60 @@ configuration retrieved via the `Provider` used to retrieve the “initial” co ``` The `Resolver` does that by passing an `onChange` func to each `Provider.Retrieve` call and capturing all watch events. + +## Troubleshooting + +### Null Maps + +Due to how our underlying merge library, [koanf](https://github.com/knadh/koanf), behaves, configuration resolution +will treat configuration such as + +```yaml +processors: +``` + +as null, which is a valid value. As a result if you have configuration `A`: + +```yaml +receivers: + nop: + +processors: + nop: + +exporters: + nop: + +extensions: + nop: + +service: + extensions: [nop] + pipelines: + traces: + receivers: [nop] + processors: [nop] + exporters: [nop] +``` + +and configuration `B`: + +```yaml +processors: +``` + +and do `./otelcorecol --config A.yaml --config B.yaml` + +The result will be an error: + +``` +Error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +2024/06/10 14:37:14 collector server run finished with error: invalid configuration: service::pipelines::traces: references processor "nop" which is not configured +``` + +This happens because configuration `B` sets `processors` to null, removing the `nop` processor defined in configuration `A`, +so the `nop` processor referenced in configuration `A`'s pipeline no longer exists. + +This situation can be remedied 2 ways: +1. Use `{}` when you want to represent an empty map, such as `processors: {}` instead of `processors:`. +2. Omit configuration like `processors:` from your configuration. diff --git a/vendor/go.opentelemetry.io/collector/confmap/confmap.go b/vendor/go.opentelemetry.io/collector/confmap/confmap.go index 8a32216a2df..c21bcc61fc1 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/confmap.go +++ b/vendor/go.opentelemetry.io/collector/confmap/confmap.go @@ -102,14 +102,54 @@ func (l *Conf) Marshal(rawVal any, _ ...MarshalOption) error { } out, ok := data.(map[string]any) if !ok { - return fmt.Errorf("invalid config encoding") + return errors.New("invalid config encoding") } return l.Merge(NewFromStringMap(out)) } +func (l *Conf) unsanitizedGet(key string) any { + return l.k.Get(key) +} + +// sanitize recursively removes expandedValue references from the given data. +// It uses the expandedValue.Value field to replace the expandedValue references. +func sanitize(a any) any { + return sanitizeExpanded(a, false) +} + +// sanitizeToStringMap recursively removes expandedValue references from the given data. +// It uses the expandedValue.Original field to replace the expandedValue references. +func sanitizeToStr(a any) any { + return sanitizeExpanded(a, true) +} + +func sanitizeExpanded(a any, useOriginal bool) any { + switch m := a.(type) { + case map[string]any: + c := maps.Copy(m) + for k, v := range m { + c[k] = sanitizeExpanded(v, useOriginal) + } + return c + case []any: + var newSlice []any + for _, e := range m { + newSlice = append(newSlice, sanitizeExpanded(e, useOriginal)) + } + return newSlice + case expandedValue: + if useOriginal { + return m.Original + } + return m.Value + } + return a +} + // Get can retrieve any value given the key to use. func (l *Conf) Get(key string) any { - return l.k.Get(key) + val := l.unsanitizedGet(key) + return sanitizeExpanded(val, false) } // IsSet checks to see if the key has been set in any of the data locations. @@ -127,21 +167,31 @@ func (l *Conf) Merge(in *Conf) error { // It returns an error is the sub-config is not a map[string]any (use Get()), and an empty Map if none exists. func (l *Conf) Sub(key string) (*Conf, error) { // Code inspired by the koanf "Cut" func, but returns an error instead of empty map for unsupported sub-config type. - data := l.Get(key) + data := l.unsanitizedGet(key) if data == nil { return New(), nil } - if v, ok := data.(map[string]any); ok { + switch v := data.(type) { + case map[string]any: return NewFromStringMap(v), nil + case expandedValue: + if m, ok := v.Value.(map[string]any); ok { + return NewFromStringMap(m), nil + } } - return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v)", key, data, reflect.TypeOf(data).Kind()) + return nil, fmt.Errorf("unexpected sub-config value kind for key:%s value:%v kind:%v", key, data, reflect.TypeOf(data).Kind()) +} + +func (l *Conf) toStringMapWithExpand() map[string]any { + m := maps.Unflatten(l.k.All(), KeyDelimiter) + return m } // ToStringMap creates a map[string]any from a Parser. func (l *Conf) ToStringMap() map[string]any { - return maps.Unflatten(l.k.All(), KeyDelimiter) + return sanitize(l.toStringMapWithExpand()).(map[string]any) } // decodeConfig decodes the contents of the Conf into the result argument, using a @@ -156,9 +206,10 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler ErrorUnused: errorUnused, Result: result, TagName: "mapstructure", - WeaklyTypedInput: true, + WeaklyTypedInput: false, MatchName: caseSensitiveMatchName, DecodeHook: mapstructure.ComposeDecodeHookFunc( + useExpandValue(), expandNilStructPointersHookFunc(), mapstructure.StringToSliceHookFunc(","), mapKeyStringToMapKeyTextUnmarshalerHookFunc(), @@ -169,14 +220,13 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler // we unmarshal the embedded structs if present to merge with the result: unmarshalerEmbeddedStructsHookFunc(), zeroSliceHookFunc(), - negativeUintHookFunc(), ), } decoder, err := mapstructure.NewDecoder(dc) if err != nil { return err } - if err = decoder.Decode(m.ToStringMap()); err != nil { + if err = decoder.Decode(m.toStringMapWithExpand()); err != nil { if strings.HasPrefix(err.Error(), "error decoding ''") { return errors.Unwrap(err) } @@ -191,6 +241,7 @@ func decodeConfig(m *Conf, result any, errorUnused bool, skipTopLevelUnmarshaler func encoderConfig(rawVal any) *encoder.EncoderConfig { return &encoder.EncoderConfig{ EncodeHook: mapstructure.ComposeDecodeHookFunc( + encoder.YamlMarshalerHookFunc(), encoder.TextMarshalerHookFunc(), marshalerHookFunc(rawVal), ), @@ -204,6 +255,64 @@ func caseSensitiveMatchName(a, b string) bool { return a == b } +func castTo(exp expandedValue, useOriginal bool) any { + // If the target field is a string, use `exp.Original` or fail if not available. + if useOriginal { + return exp.Original + } + // Otherwise, use the parsed value (previous behavior). + return exp.Value +} + +// Check if a reflect.Type is of the form T, where: +// X is any type or interface +// T = string | map[X]T | []T | [n]T +func isStringyStructure(t reflect.Type) bool { + if t.Kind() == reflect.String { + return true + } + if t.Kind() == reflect.Map { + return isStringyStructure(t.Elem()) + } + if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + return isStringyStructure(t.Elem()) + } + return false +} + +// When a value has been loaded from an external source via a provider, we keep both the +// parsed value and the original string value. This allows us to expand the value to its +// original string representation when decoding into a string field, and use the original otherwise. +func useExpandValue() mapstructure.DecodeHookFuncType { + return func( + _ reflect.Type, + to reflect.Type, + data any, + ) (any, error) { + if exp, ok := data.(expandedValue); ok { + v := castTo(exp, to.Kind() == reflect.String) + // See https://github.com/open-telemetry/opentelemetry-collector/issues/10949 + // If the `to.Kind` is not a string, then expandValue's original value is useless and + // the casted-to value will be nil. In that scenario, we need to use the default value of `to`'s kind. + if v == nil { + return reflect.Zero(to).Interface(), nil + } + return v, nil + } + + switch to.Kind() { + case reflect.Array, reflect.Slice, reflect.Map: + if isStringyStructure(to) { + // If the target field is a stringy structure, sanitize to use the original string value everywhere. + return sanitizeToStr(data), nil + } + // Otherwise, sanitize to use the parsed value everywhere. + return sanitize(data), nil + } + return data, nil + } +} + // In cases where a config has a mapping of something to a struct pointers // we want nil values to resolve to a pointer to the zero value of the // underlying struct just as we want nil values of a mapping of something @@ -415,10 +524,10 @@ type Marshaler interface { // } // // The configuration provided by users may have following cases -// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overrided +// 1. configuration have `keys` field and have a non-nil values for this key, the output should be overridden // - for example, input is {"keys", ["c"]}, then output is Config{ Keys: ["c"]} // -// 2. configuration have `keys` field and have an empty slice for this key, the output should be overrided by empty slics +// 2. configuration have `keys` field and have an empty slice for this key, the output should be overridden by empty slices // - for example, input is {"keys", []}, then output is Config{ Keys: []} // // 3. configuration have `keys` field and have nil value for this key, the output should be default config @@ -436,19 +545,6 @@ func zeroSliceHookFunc() mapstructure.DecodeHookFuncValue { } } -// This hook is used to solve the issue: https://github.com/open-telemetry/opentelemetry-collector/issues/9060 -// Decoding should fail when converting a negative integer to any type of unsigned integer. This prevents -// negative values being decoded as large uint values. -// TODO: This should be removed as a part of https://github.com/open-telemetry/opentelemetry-collector/issues/9532 -func negativeUintHookFunc() mapstructure.DecodeHookFuncValue { - return func(from reflect.Value, to reflect.Value) (interface{}, error) { - if from.CanInt() && from.Int() < 0 && to.CanUint() { - return nil, fmt.Errorf("cannot convert negative value %v to an unsigned integer", from.Int()) - } - return from.Interface(), nil - } -} - type moduleFactory[T any, S any] interface { Create(s S) T } diff --git a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go b/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go deleted file mode 100644 index 029b48eaf17..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/converter/expandconverter/expand.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package expandconverter // import "go.opentelemetry.io/collector/confmap/converter/expandconverter" - -import ( - "context" - "fmt" - "os" - "regexp" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/internal/envvar" -) - -type converter struct { - logger *zap.Logger - - // Record of which env vars we have logged a warning for - loggedDeprecations map[string]struct{} -} - -// NewFactory returns a factory for a confmap.Converter, -// which expands all environment variables for a given confmap.Conf. -func NewFactory() confmap.ConverterFactory { - return confmap.NewConverterFactory(newConverter) -} - -func newConverter(set confmap.ConverterSettings) confmap.Converter { - return converter{ - loggedDeprecations: make(map[string]struct{}), - logger: set.Logger, - } -} - -func (c converter) Convert(_ context.Context, conf *confmap.Conf) error { - var err error - out := make(map[string]any) - for _, k := range conf.AllKeys() { - out[k], err = c.expandStringValues(conf.Get(k)) - if err != nil { - return err - } - } - return conf.Merge(confmap.NewFromStringMap(out)) -} - -func (c converter) expandStringValues(value any) (any, error) { - var err error - switch v := value.(type) { - case string: - return c.expandEnv(v) - case []any: - nslice := make([]any, 0, len(v)) - for _, vint := range v { - var nv any - nv, err = c.expandStringValues(vint) - if err != nil { - return nil, err - } - nslice = append(nslice, nv) - } - return nslice, nil - case map[string]any: - nmap := map[string]any{} - for mk, mv := range v { - nmap[mk], err = c.expandStringValues(mv) - if err != nil { - return nil, err - } - } - return nmap, nil - default: - return v, nil - } -} - -func (c converter) expandEnv(s string) (string, error) { - var err error - res := os.Expand(s, func(str string) string { - // Matches on $VAR style environment variables - // in order to make sure we don't log a warning for ${VAR} - var regex = regexp.MustCompile(fmt.Sprintf(`\$%s`, regexp.QuoteMeta(str))) - if _, exists := c.loggedDeprecations[str]; !exists && regex.MatchString(s) { - msg := fmt.Sprintf("Variable substitution using $VAR will be deprecated in favor of ${VAR} and ${env:VAR}, please update $%s", str) - c.logger.Warn(msg, zap.String("variable", str)) - c.loggedDeprecations[str] = struct{}{} - } - // This allows escaping environment variable substitution via $$, e.g. - // - $FOO will be substituted with env var FOO - // - $$FOO will be replaced with $FOO - // - $$$FOO will be replaced with $ + substituted env var FOO - if str == "$" { - return "$" - } - // For $ENV style environment variables os.Expand returns once it hits a character that isn't an underscore or - // an alphanumeric character - so we cannot detect those malformed environment variables. - // For ${ENV} style variables we can detect those kinds of env var names! - if !envvar.ValidationRegexp.MatchString(str) { - err = fmt.Errorf("environment variable %q has invalid name: must match regex %s", str, envvar.ValidationPattern) - return "" - } - val, exists := os.LookupEnv(str) - if !exists { - c.logger.Warn("Configuration references unset environment variable", zap.String("name", str)) - } else if len(val) == 0 { - c.logger.Info("Configuration references empty environment variable", zap.String("name", str)) - } - return val - }) - return res, err -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/expand.go b/vendor/go.opentelemetry.io/collector/confmap/expand.go index 768395f76fd..42f3b6296da 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/expand.go +++ b/vendor/go.opentelemetry.io/collector/confmap/expand.go @@ -7,9 +7,7 @@ import ( "context" "errors" "fmt" - "reflect" "regexp" - "strconv" "strings" ) @@ -26,7 +24,7 @@ var ( ) func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, error) { - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { val, changed, err := mr.expandValue(ctx, value) if err != nil { return nil, err @@ -41,6 +39,34 @@ func (mr *Resolver) expandValueRecursively(ctx context.Context, value any) (any, func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, error) { switch v := value.(type) { + case expandedValue: + expanded, changed, err := mr.expandValue(ctx, v.Value) + if err != nil { + return nil, false, err + } + + switch exp := expanded.(type) { + case expandedValue, string: + // Return expanded values or strings verbatim. + return exp, changed, nil + } + + // At this point we don't know the target field type, so we need to expand the original representation as well. + originalExpanded, originalChanged, err := mr.expandValue(ctx, v.Original) + if err != nil { + // The original representation is not valid, return the expanded value. + return expanded, changed, nil + } + + if originalExpanded, ok := originalExpanded.(string); ok { + // If the original representation is a string, return the expanded value with the original representation. + return expandedValue{ + Value: expanded, + Original: originalExpanded, + }, changed || originalChanged, nil + } + + return expanded, changed, nil case string: if !strings.Contains(v, "${") || !strings.Contains(v, "}") { // No URIs to expand. @@ -79,6 +105,7 @@ func (mr *Resolver) expandValue(ctx context.Context, value any) (any, bool, erro // findURI attempts to find the first potentially expandable URI in input. It returns a potentially expandable // URI, or an empty string if none are found. // Note: findURI is only called when input contains a closing bracket. +// We do not support escaping nested URIs (such as ${env:$${FOO}}, since that would result in an invalid outer URI (${env:${FOO}}). func (mr *Resolver) findURI(input string) string { closeIndex := strings.Index(input, "}") remaining := input[closeIndex+1:] @@ -96,9 +123,35 @@ func (mr *Resolver) findURI(input string) string { return mr.findURI(remaining) } + index := openIndex - 1 + currentRune := '$' + count := 0 + for index >= 0 && currentRune == '$' { + currentRune = rune(input[index]) + if currentRune == '$' { + count++ + } + index-- + } + // if we found an odd number of immediately $ preceding ${, then the expansion is escaped + if count%2 == 1 { + return "" + } + return input[openIndex : closeIndex+1] } +// expandedValue holds the YAML parsed value and original representation of a value. +// It keeps track of the original representation to be used by the 'useExpandValue' hook +// if the target field is a string. We need to keep both representations because we don't know +// what the target field type is until `Unmarshal` is called. +type expandedValue struct { + // Value is the expanded value. + Value any + // Original is the original representation of the value. + Original string +} + // findAndExpandURI attempts to find and expand the first occurrence of an expandable URI in input. If an expandable URI is found it // returns the input with the URI expanded, true and nil. Otherwise, it returns the unchanged input, false and the expanding error. // This method expects input to start with ${ and end with } @@ -111,38 +164,38 @@ func (mr *Resolver) findAndExpandURI(ctx context.Context, input string) (any, bo if uri == input { // If the value is a single URI, then the return value can be anything. // This is the case `foo: ${file:some_extra_config.yml}`. - return mr.expandURI(ctx, input) + ret, err := mr.expandURI(ctx, input) + if err != nil { + return input, false, err + } + + val, err := ret.AsRaw() + if err != nil { + return input, false, err + } + + if asStr, err2 := ret.AsString(); err2 == nil { + return expandedValue{ + Value: val, + Original: asStr, + }, true, nil + } + + return val, true, nil } - expanded, changed, err := mr.expandURI(ctx, uri) + expanded, err := mr.expandURI(ctx, uri) if err != nil { return input, false, err } - repl, err := toString(expanded) + + repl, err := expanded.AsString() if err != nil { return input, false, fmt.Errorf("expanding %v: %w", uri, err) } - return strings.ReplaceAll(input, uri, repl), changed, err -} - -// toString attempts to convert input to a string. -func toString(input any) (string, error) { - // This list must be kept in sync with checkRawConfType. - val := reflect.ValueOf(input) - switch val.Kind() { - case reflect.String: - return val.String(), nil - case reflect.Int, reflect.Int32, reflect.Int64: - return strconv.FormatInt(val.Int(), 10), nil - case reflect.Float32, reflect.Float64: - return strconv.FormatFloat(val.Float(), 'f', -1, 64), nil - case reflect.Bool: - return strconv.FormatBool(val.Bool()), nil - default: - return "", fmt.Errorf("expected convertable to string value type, got %q(%T)", input, input) - } + return strings.ReplaceAll(input, uri, repl), true, err } -func (mr *Resolver) expandURI(ctx context.Context, input string) (any, bool, error) { +func (mr *Resolver) expandURI(ctx context.Context, input string) (*Retrieved, error) { // strip ${ and } uri := input[2 : len(input)-1] @@ -152,19 +205,18 @@ func (mr *Resolver) expandURI(ctx context.Context, input string) (any, bool, err lURI, err := newLocation(uri) if err != nil { - return nil, false, err + return nil, err } if strings.Contains(lURI.opaqueValue, "$") { - return nil, false, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) + return nil, fmt.Errorf("the uri %q contains unsupported characters ('$')", lURI.asString()) } ret, err := mr.retrieveValue(ctx, lURI) if err != nil { - return nil, false, err + return nil, err } mr.closers = append(mr.closers, ret.Close) - val, err := ret.AsRaw() - return val, true, err + return ret, nil } type location struct { diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go b/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go deleted file mode 100644 index 6ce639ec275..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/envvar/pattern.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package envvar // import "go.opentelemetry.io/collector/confmap/internal/envvar" - -import "regexp" - -const ValidationPattern = `^[a-zA-Z_][a-zA-Z0-9_]*$` - -var ValidationRegexp = regexp.MustCompile(ValidationPattern) diff --git a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go index d0e222e08b6..ffc0bdc2985 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go +++ b/vendor/go.opentelemetry.io/collector/confmap/internal/mapstructure/encoder.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/go-viper/mapstructure/v2" + "gopkg.in/yaml.v3" ) const ( @@ -22,9 +23,7 @@ const ( optionSkip = "-" ) -var ( - errNonStringEncodedKey = errors.New("non string-encoded key") -) +var errNonStringEncodedKey = errors.New("non string-encoded key") // tagInfo stores the mapstructure tag details. type tagInfo struct { @@ -228,3 +227,35 @@ func TextMarshalerHookFunc() mapstructure.DecodeHookFuncValue { return string(out), nil } } + +// YamlMarshalerHookFunc returns a DecodeHookFuncValue that checks for structs +// that have yaml tags but no mapstructure tags. If found, it will convert the struct +// to map[string]any using the yaml package, which respects the yaml tags. Ultimately, +// this allows mapstructure to later marshal the map[string]any in a generic way. +func YamlMarshalerHookFunc() mapstructure.DecodeHookFuncValue { + return func(from reflect.Value, _ reflect.Value) (any, error) { + if from.Kind() == reflect.Struct { + for i := 0; i < from.NumField(); i++ { + if _, ok := from.Type().Field(i).Tag.Lookup("mapstructure"); ok { + // The struct has at least one mapstructure tag so don't do anything. + return from.Interface(), nil + } + + if _, ok := from.Type().Field(i).Tag.Lookup("yaml"); ok { + // The struct has at least one yaml tag, so convert it to map[string]any using yaml. + yamlBytes, err := yaml.Marshal(from.Interface()) + if err != nil { + return nil, err + } + var m map[string]any + err = yaml.Unmarshal(yamlBytes, &m) + if err != nil { + return nil, err + } + return m, nil + } + } + } + return from.Interface(), nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider.go index 192577ed4d8..c462b9bb6fe 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/provider.go +++ b/vendor/go.opentelemetry.io/collector/confmap/provider.go @@ -6,8 +6,10 @@ package confmap // import "go.opentelemetry.io/collector/confmap" import ( "context" "fmt" + "time" "go.uber.org/zap" + "gopkg.in/yaml.v3" ) // ProviderSettings are the settings to initialize a Provider. @@ -99,21 +101,63 @@ type ChangeEvent struct { type Retrieved struct { rawConf any closeFunc CloseFunc + + stringRepresentation string + isSetString bool } type retrievedSettings struct { - closeFunc CloseFunc + stringRepresentation string + isSetString bool + closeFunc CloseFunc } // RetrievedOption options to customize Retrieved values. -type RetrievedOption func(*retrievedSettings) +type RetrievedOption interface { + apply(*retrievedSettings) +} + +type retrievedOptionFunc func(*retrievedSettings) + +func (of retrievedOptionFunc) apply(e *retrievedSettings) { + of(e) +} // WithRetrievedClose overrides the default Retrieved.Close function. // The default Retrieved.Close function does nothing and always returns nil. func WithRetrievedClose(closeFunc CloseFunc) RetrievedOption { - return func(settings *retrievedSettings) { + return retrievedOptionFunc(func(settings *retrievedSettings) { settings.closeFunc = closeFunc + }) +} + +func withStringRepresentation(stringRepresentation string) RetrievedOption { + return retrievedOptionFunc(func(settings *retrievedSettings) { + settings.stringRepresentation = stringRepresentation + settings.isSetString = true + }) +} + +// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. +// * yamlBytes the yaml bytes that will be deserialized. +// * opts specifies options associated with this Retrieved value, such as CloseFunc. +func NewRetrievedFromYAML(yamlBytes []byte, opts ...RetrievedOption) (*Retrieved, error) { + var rawConf any + if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { + // If the string is not valid YAML, we try to use it verbatim as a string. + strRep := string(yamlBytes) + return NewRetrieved(strRep, append(opts, withStringRepresentation(strRep))...) + } + + switch rawConf.(type) { + case string: + val := string(yamlBytes) + return NewRetrieved(val, append(opts, withStringRepresentation(val))...) + default: + opts = append(opts, withStringRepresentation(string(yamlBytes))) } + + return NewRetrieved(rawConf, opts...) } // NewRetrieved returns a new Retrieved instance that contains the data from the raw deserialized config. @@ -127,9 +171,14 @@ func NewRetrieved(rawConf any, opts ...RetrievedOption) (*Retrieved, error) { } set := retrievedSettings{} for _, opt := range opts { - opt(&set) + opt.apply(&set) } - return &Retrieved{rawConf: rawConf, closeFunc: set.closeFunc}, nil + return &Retrieved{ + rawConf: rawConf, + closeFunc: set.closeFunc, + stringRepresentation: set.stringRepresentation, + isSetString: set.isSetString, + }, nil } // AsConf returns the retrieved configuration parsed as a Conf. @@ -152,6 +201,20 @@ func (r *Retrieved) AsRaw() (any, error) { return r.rawConf, nil } +// AsString returns the retrieved configuration as a string. +// If the retrieved configuration is not convertible to a string unambiguously, an error is returned. +// If the retrieved configuration is a string, the string is returned. +// This method is used to resolve ${} references in inline position. +func (r *Retrieved) AsString() (string, error) { + if !r.isSetString { + if str, ok := r.rawConf.(string); ok { + return str, nil + } + return "", fmt.Errorf("retrieved value does not have unambiguous string representation: %v", r.rawConf) + } + return r.stringRepresentation, nil +} + // Close and release any watchers that Provider.Retrieve may have created. // // Should block until all resources are closed, and guarantee that `onChange` is not @@ -173,7 +236,7 @@ func checkRawConfType(rawConf any) error { return nil } switch rawConf.(type) { - case int, int32, int64, float32, float64, bool, string, []any, map[string]any: + case int, int32, int64, float32, float64, bool, string, []any, map[string]any, time.Time: return nil default: return fmt.Errorf( diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go deleted file mode 100644 index 50192b5d994..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/provider.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package envprovider // import "go.opentelemetry.io/collector/confmap/provider/envprovider" - -import ( - "context" - "fmt" - "os" - "strings" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/internal/envvar" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const ( - schemeName = "env" -) - -type provider struct { - logger *zap.Logger -} - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from the given environment variable. -// -// This Provider supports "env" scheme, and can be called with a selector: -// `env:NAME_OF_ENVIRONMENT_VARIABLE` -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(ps confmap.ProviderSettings) confmap.Provider { - return &provider{ - logger: ps.Logger, - } -} - -func (emp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - envVarName := uri[len(schemeName)+1:] - if !envvar.ValidationRegexp.MatchString(envVarName) { - return nil, fmt.Errorf("environment variable %q has invalid name: must match regex %s", envVarName, envvar.ValidationPattern) - - } - val, exists := os.LookupEnv(envVarName) - if !exists { - emp.logger.Warn("Configuration references unset environment variable", zap.String("name", envVarName)) - } else if len(val) == 0 { - emp.logger.Info("Configuration references empty environment variable", zap.String("name", envVarName)) - } - - return internal.NewRetrievedFromYAML([]byte(val)) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go deleted file mode 100644 index fe958280cfb..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/provider.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package fileprovider // import "go.opentelemetry.io/collector/confmap/provider/fileprovider" - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const schemeName = "file" - -type provider struct{} - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a file. -// -// This Provider supports "file" scheme, and can be called with a "uri" that follows: -// -// file-uri = "file:" local-path -// local-path = [ drive-letter ] file-path -// drive-letter = ALPHA ":" -// -// The "file-path" can be relative or absolute, and it can be any OS supported format. -// -// Examples: -// `file:path/to/file` - relative path (unix, windows) -// `file:/path/to/file` - absolute path (unix, windows) -// `file:c:/path/to/file` - absolute path including drive-letter (windows) -// `file:c:\path\to\file` - absolute path including drive-letter (windows) -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(confmap.ProviderSettings) confmap.Provider { - return &provider{} -} - -func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - - // Clean the path before using it. - content, err := os.ReadFile(filepath.Clean(uri[len(schemeName)+1:])) - if err != nil { - return nil, fmt.Errorf("unable to read the file %v: %w", uri, err) - } - - return internal.NewRetrievedFromYAML(content) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md deleted file mode 100644 index 673fd2ccd2e..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/README.md +++ /dev/null @@ -1,13 +0,0 @@ -What is this new component httpprovider? -- An implementation of `confmap.Provider` for HTTP (httpprovider) allows OTEL Collector the ability to load configuration for itself by fetching and reading config files stored in HTTP servers. - -How this new component httpprovider works? -- It will be called by `confmap.Resolver` to load configurations for OTEL Collector. -- By giving a config URI starting with prefix 'http://', this httpprovider will be used to download config files from given HTTP URIs, and then used the downloaded config files to deploy the OTEL Collector. -- In our code, we check the validity scheme and string pattern of HTTP URIs. And also check if there are any problems on config downloading and config deserialization. - -Expected URI format: -- http://... - -Prerequistes: -- Need to setup a HTTP server ahead, which returns with a config files according to the given URI \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go deleted file mode 100644 index c47c1df5d99..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpprovider/provider.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package httpprovider // import "go.opentelemetry.io/collector/confmap/provider/httpprovider" - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" -) - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a http server. -// -// This Provider supports "http" scheme. -// -// One example for HTTP URI is: http://localhost:3333/getConfig -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(set confmap.ProviderSettings) confmap.Provider { - return configurablehttpprovider.New(configurablehttpprovider.HTTPScheme, set) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md deleted file mode 100644 index 270dd1b23dd..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/README.md +++ /dev/null @@ -1,18 +0,0 @@ -### What is the httpsprovider? - -An implementation of `confmap.Provider` for HTTPS (httpsprovider) allows OTEL Collector to use the HTTPS protocol to -load configuration files stored in web servers. - -Expected URI format: -- https://... - -### Prerequistes - -You need to setup a HTTP server with support to HTTPS. The server must have a certificate that can be validated in the -host running the collector using system root certificates. - -### Configuration - -At this moment, this component only support communicating with servers whose certificate can be verified using the root -CA certificates installed in the system. The process of adding more root CA certificates to the system is operating -system dependent. For Linux, please refer to the `update-ca-trust` command. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go deleted file mode 100644 index 579c15babf2..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/httpsprovider/provider.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package httpsprovider // import "go.opentelemetry.io/collector/confmap/provider/httpsprovider" - -import ( - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" -) - -// NewFactory returns a factory for a confmap.Provider that reads the configuration from a https server. -// -// This Provider supports "https" scheme. One example of an HTTPS URI is: https://localhost:3333/getConfig -// -// To add extra CA certificates you need to install certificates in the system pool. This procedure is operating system -// dependent. E.g.: on Linux please refer to the `update-ca-trust` command. -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(set confmap.ProviderSettings) confmap.Provider { - return configurablehttpprovider.New(configurablehttpprovider.HTTPSScheme, set) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go deleted file mode 100644 index c683d3d9a4e..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider/provider.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package configurablehttpprovider // import "go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider" - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -type SchemeType string - -const ( - HTTPScheme SchemeType = "http" - HTTPSScheme SchemeType = "https" -) - -type provider struct { - scheme SchemeType - caCertPath string // Used for tests - insecureSkipVerify bool // Used for tests -} - -// New returns a new provider that reads the configuration from http server using the configured transport mechanism -// depending on the selected scheme. -// There are two types of transport supported: PlainText (HTTPScheme) and TLS (HTTPSScheme). -// -// One example for http-uri: http://localhost:3333/getConfig -// One example for https-uri: https://localhost:3333/getConfig -// This is used by the http and https external implementations. -func New(scheme SchemeType, _ confmap.ProviderSettings) confmap.Provider { - return &provider{scheme: scheme} -} - -// Create the client based on the type of scheme that was selected. -func (fmp *provider) createClient() (*http.Client, error) { - switch fmp.scheme { - case HTTPScheme: - return &http.Client{}, nil - case HTTPSScheme: - pool, err := x509.SystemCertPool() - - if err != nil { - return nil, fmt.Errorf("unable to create a cert pool: %w", err) - } - - if fmp.caCertPath != "" { - cert, err := os.ReadFile(filepath.Clean(fmp.caCertPath)) - - if err != nil { - return nil, fmt.Errorf("unable to read CA from %q URI: %w", fmp.caCertPath, err) - } - - if ok := pool.AppendCertsFromPEM(cert); !ok { - return nil, fmt.Errorf("unable to add CA from uri: %s into the cert pool", fmp.caCertPath) - } - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: fmp.insecureSkipVerify, - RootCAs: pool, - }, - }, - }, nil - default: - return nil, fmt.Errorf("invalid scheme type: %s", fmp.scheme) - } -} - -func (fmp *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - - if !strings.HasPrefix(uri, string(fmp.scheme)+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, string(fmp.scheme)) - } - - client, err := fmp.createClient() - - if err != nil { - return nil, fmt.Errorf("unable to configure http transport layer: %w", err) - } - - // send a HTTP GET request - resp, err := client.Get(uri) - if err != nil { - return nil, fmt.Errorf("unable to download the file via HTTP GET for uri %q: %w ", uri, err) - } - defer resp.Body.Close() - - // check the HTTP status code - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to load resource from uri %q. status code: %d", uri, resp.StatusCode) - } - - // read the response body - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("fail to read the response body from uri %q: %w", uri, err) - } - - return internal.NewRetrievedFromYAML(body) -} - -func (fmp *provider) Scheme() string { - return string(fmp.scheme) -} - -func (*provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go deleted file mode 100644 index 5a378997529..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/internal/provider.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/confmap/provider/internal" - -import ( - "gopkg.in/yaml.v3" - - "go.opentelemetry.io/collector/confmap" -) - -// NewRetrievedFromYAML returns a new Retrieved instance that contains the deserialized data from the yaml bytes. -// * yamlBytes the yaml bytes that will be deserialized. -// * opts specifies options associated with this Retrieved value, such as CloseFunc. -func NewRetrievedFromYAML(yamlBytes []byte, opts ...confmap.RetrievedOption) (*confmap.Retrieved, error) { - var rawConf any - if err := yaml.Unmarshal(yamlBytes, &rawConf); err != nil { - return nil, err - } - return confmap.NewRetrieved(rawConf, opts...) -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile deleted file mode 100644 index bdd863a203b..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/Makefile +++ /dev/null @@ -1 +0,0 @@ -include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go b/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go deleted file mode 100644 index 949fc71bf7d..00000000000 --- a/vendor/go.opentelemetry.io/collector/confmap/provider/yamlprovider/provider.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package yamlprovider // import "go.opentelemetry.io/collector/confmap/provider/yamlprovider" - -import ( - "context" - "fmt" - "strings" - - "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/provider/internal" -) - -const schemeName = "yaml" - -type provider struct{} - -// NewFactory returns a factory for a confmap.Provider that allows to provide yaml bytes. -// -// This Provider supports "yaml" scheme, and can be called with a "uri" that follows: -// -// bytes-uri = "yaml:" yaml-bytes -// -// Examples: -// `yaml:processors::batch::timeout: 2s` -// `yaml:processors::batch/foo::timeout: 3s` -func NewFactory() confmap.ProviderFactory { - return confmap.NewProviderFactory(newProvider) -} - -func newProvider(confmap.ProviderSettings) confmap.Provider { - return &provider{} -} - -func (s *provider) Retrieve(_ context.Context, uri string, _ confmap.WatcherFunc) (*confmap.Retrieved, error) { - if !strings.HasPrefix(uri, schemeName+":") { - return nil, fmt.Errorf("%q uri is not supported by %q provider", uri, schemeName) - } - - return internal.NewRetrievedFromYAML([]byte(uri[len(schemeName)+1:])) -} - -func (*provider) Scheme() string { - return schemeName -} - -func (s *provider) Shutdown(context.Context) error { - return nil -} diff --git a/vendor/go.opentelemetry.io/collector/confmap/resolver.go b/vendor/go.opentelemetry.io/collector/confmap/resolver.go index 7c9ed303c73..e635ea99564 100644 --- a/vendor/go.opentelemetry.io/collector/confmap/resolver.go +++ b/vendor/go.opentelemetry.io/collector/confmap/resolver.go @@ -96,7 +96,17 @@ func NewResolver(set ResolverSettings) (*Resolver, error) { providers := make(map[string]Provider, len(set.ProviderFactories)) for _, factory := range set.ProviderFactories { provider := factory.Create(set.ProviderSettings) - providers[provider.Scheme()] = provider + scheme := provider.Scheme() + // Check that the scheme follows the pattern. + if !regexp.MustCompile(schemePattern).MatchString(scheme) { + return nil, fmt.Errorf("invalid 'confmap.Provider' scheme %q", scheme) + } + // Check that the scheme is unique. + if _, ok := providers[scheme]; ok { + return nil, fmt.Errorf("duplicate 'confmap.Provider' scheme %q", scheme) + } + + providers[scheme] = provider } if set.DefaultScheme != "" { @@ -167,11 +177,11 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) { cfgMap := make(map[string]any) for _, k := range retMap.AllKeys() { - val, err := mr.expandValueRecursively(ctx, retMap.Get(k)) + val, err := mr.expandValueRecursively(ctx, retMap.unsanitizedGet(k)) if err != nil { return nil, err } - cfgMap[k] = val + cfgMap[k] = escapeDollarSigns(val) } retMap = NewFromStringMap(cfgMap) @@ -185,6 +195,31 @@ func (mr *Resolver) Resolve(ctx context.Context) (*Conf, error) { return retMap, nil } +func escapeDollarSigns(val any) any { + switch v := val.(type) { + case string: + return strings.ReplaceAll(v, "$$", "$") + case expandedValue: + v.Original = strings.ReplaceAll(v.Original, "$$", "$") + v.Value = escapeDollarSigns(v.Value) + return v + case []any: + nslice := make([]any, len(v)) + for i, x := range v { + nslice[i] = escapeDollarSigns(x) + } + return nslice + case map[string]any: + nmap := make(map[string]any, len(v)) + for k, x := range v { + nmap[k] = escapeDollarSigns(x) + } + return nmap + default: + return val + } +} + // Watch blocks until any configuration change was detected or an unrecoverable error // happened during monitoring the configuration changes. // diff --git a/vendor/go.opentelemetry.io/collector/connector/README.md b/vendor/go.opentelemetry.io/collector/connector/README.md index 51377534628..02477644c4d 100644 --- a/vendor/go.opentelemetry.io/collector/connector/README.md +++ b/vendor/go.opentelemetry.io/collector/connector/README.md @@ -1,7 +1,7 @@ # Connectors A connector is both an exporter and receiver. As the name suggests a Connector connects -two pipelines: it consumes data as an exporter at the end of one pipeline and emits data +two pipelines: it emits data as an exporter at the end of one pipeline and consumes data as a receiver at the start of another pipeline. It may consume and emit data of the same data type, or of different data types. A connector may generate and emit data to summarize the consumed data, or it may simply replicate or route data. diff --git a/vendor/go.opentelemetry.io/collector/connector/connector.go b/vendor/go.opentelemetry.io/collector/connector/connector.go index 94927092117..2ae78f26a6b 100644 --- a/vendor/go.opentelemetry.io/collector/connector/connector.go +++ b/vendor/go.opentelemetry.io/collector/connector/connector.go @@ -5,17 +5,12 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( "context" - "errors" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" -) - -var ( - errNilNextConsumer = errors.New("nil next Consumer") + "go.opentelemetry.io/collector/pipeline" ) // A Traces connector acts as an exporter from a traces pipeline and a receiver @@ -66,8 +61,8 @@ type Logs interface { consumer.Logs } -// CreateSettings configures Connector creators. -type CreateSettings struct { +// Settings configures Connector creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -77,7 +72,7 @@ type CreateSettings struct { BuildInfo component.BuildInfo } -// Factory is factory interface for connectors. +// Factory is a factory interface for connectors. // // This interface cannot be directly implemented. Implementations must // use the NewFactory to implement it. @@ -93,17 +88,17 @@ type Factory interface { // tests of any implementation of the Factory interface. CreateDefaultConfig() component.Config - CreateTracesToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) - CreateTracesToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Traces, error) - CreateTracesToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Traces, error) + CreateTracesToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) + CreateTracesToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Traces, error) + CreateTracesToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Traces, error) - CreateMetricsToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Metrics, error) - CreateMetricsToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) - CreateMetricsToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Metrics, error) + CreateMetricsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Metrics, error) + CreateMetricsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) + CreateMetricsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Metrics, error) - CreateLogsToTraces(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Logs, error) - CreateLogsToMetrics(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Logs, error) - CreateLogsToLogs(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + CreateLogsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Logs, error) + CreateLogsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Logs, error) + CreateLogsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) TracesToTracesStability() component.StabilityLevel TracesToMetricsStability() component.StabilityLevel @@ -136,185 +131,104 @@ func (f factoryOptionFunc) apply(o *factory) { } // CreateTracesToTracesFunc is the equivalent of Factory.CreateTracesToTraces(). -type CreateTracesToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) +type CreateTracesToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) // CreateTracesToTraces implements Factory.CreateTracesToTraces(). -func (f CreateTracesToTracesFunc) CreateTracesToTraces( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces) (Traces, error) { +func (f CreateTracesToTracesFunc) CreateTracesToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeTraces, component.DataTypeTraces) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalTraces) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateTracesToMetricsFunc is the equivalent of Factory.CreateTracesToMetrics(). -type CreateTracesToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Traces, error) +type CreateTracesToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Traces, error) // CreateTracesToMetrics implements Factory.CreateTracesToMetrics(). -func (f CreateTracesToMetricsFunc) CreateTracesToMetrics( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Traces, error) { +func (f CreateTracesToMetricsFunc) CreateTracesToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Traces, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeTraces, component.DataTypeMetrics) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalMetrics) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateTracesToLogsFunc is the equivalent of Factory.CreateTracesToLogs(). -type CreateTracesToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Traces, error) +type CreateTracesToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Traces, error) // CreateTracesToLogs implements Factory.CreateTracesToLogs(). -func (f CreateTracesToLogsFunc) CreateTracesToLogs( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Traces, error) { +func (f CreateTracesToLogsFunc) CreateTracesToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Traces, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeTraces, component.DataTypeLogs) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, pipeline.SignalLogs) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsToTracesFunc is the equivalent of Factory.CreateMetricsToTraces(). -type CreateMetricsToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Metrics, error) +type CreateMetricsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Metrics, error) // CreateMetricsToTraces implements Factory.CreateMetricsToTraces(). -func (f CreateMetricsToTracesFunc) CreateMetricsToTraces( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces, -) (Metrics, error) { +func (f CreateMetricsToTracesFunc) CreateMetricsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Metrics, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeMetrics, component.DataTypeTraces) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalTraces) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsToMetricsFunc is the equivalent of Factory.CreateMetricsToTraces(). -type CreateMetricsToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) +type CreateMetricsToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) // CreateMetricsToMetrics implements Factory.CreateMetricsToTraces(). -func (f CreateMetricsToMetricsFunc) CreateMetricsToMetrics( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Metrics, error) { +func (f CreateMetricsToMetricsFunc) CreateMetricsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeMetrics, component.DataTypeMetrics) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalMetrics) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsToLogsFunc is the equivalent of Factory.CreateMetricsToLogs(). -type CreateMetricsToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Metrics, error) +type CreateMetricsToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Metrics, error) // CreateMetricsToLogs implements Factory.CreateMetricsToLogs(). -func (f CreateMetricsToLogsFunc) CreateMetricsToLogs( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Metrics, error) { +func (f CreateMetricsToLogsFunc) CreateMetricsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Metrics, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeMetrics, component.DataTypeLogs) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, pipeline.SignalLogs) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateLogsToTracesFunc is the equivalent of Factory.CreateLogsToTraces(). -type CreateLogsToTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Logs, error) +type CreateLogsToTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Logs, error) // CreateLogsToTraces implements Factory.CreateLogsToTraces(). -func (f CreateLogsToTracesFunc) CreateLogsToTraces( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces, -) (Logs, error) { +func (f CreateLogsToTracesFunc) CreateLogsToTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Logs, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeLogs, component.DataTypeTraces) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalTraces) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateLogsToMetricsFunc is the equivalent of Factory.CreateLogsToMetrics(). -type CreateLogsToMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Logs, error) +type CreateLogsToMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Logs, error) // CreateLogsToMetrics implements Factory.CreateLogsToMetrics(). -func (f CreateLogsToMetricsFunc) CreateLogsToMetrics( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Logs, error) { +func (f CreateLogsToMetricsFunc) CreateLogsToMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Logs, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeLogs, component.DataTypeMetrics) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalMetrics) } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateLogsToLogsFunc is the equivalent of Factory.CreateLogsToLogs(). -type CreateLogsToLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +type CreateLogsToLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) // CreateLogsToLogs implements Factory.CreateLogsToLogs(). -func (f CreateLogsToLogsFunc) CreateLogsToLogs( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Logs, error) { +func (f CreateLogsToLogsFunc) CreateLogsToLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { if f == nil { - return nil, errDataTypes(set.ID, component.DataTypeLogs, component.DataTypeLogs) + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, pipeline.SignalLogs) } - return f(ctx, set, cfg, nextConsumer) -} - -// factory implements Factory. -type factory struct { - cfgType component.Type - component.CreateDefaultConfigFunc - - CreateTracesToTracesFunc - CreateTracesToMetricsFunc - CreateTracesToLogsFunc - - CreateMetricsToTracesFunc - CreateMetricsToMetricsFunc - CreateMetricsToLogsFunc - - CreateLogsToTracesFunc - CreateLogsToMetricsFunc - CreateLogsToLogsFunc - - tracesToTracesStabilityLevel component.StabilityLevel - tracesToMetricsStabilityLevel component.StabilityLevel - tracesToLogsStabilityLevel component.StabilityLevel - - metricsToTracesStabilityLevel component.StabilityLevel - metricsToMetricsStabilityLevel component.StabilityLevel - metricsToLogsStabilityLevel component.StabilityLevel - - logsToTracesStabilityLevel component.StabilityLevel - logsToMetricsStabilityLevel component.StabilityLevel - logsToLogsStabilityLevel component.StabilityLevel + return f(ctx, set, cfg, next) } -// Type returns the type of component. -func (f *factory) Type() component.Type { - return f.cfgType -} - -func (f *factory) unexportedFactoryFunc() {} - // WithTracesToTraces overrides the default "error not supported" implementation for WithTracesToTraces and the default "undefined" stability level. func WithTracesToTraces(createTracesToTraces CreateTracesToTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { @@ -387,39 +301,76 @@ func WithLogsToLogs(createLogsToLogs CreateLogsToLogsFunc, sl component.Stabilit }) } -func (f factory) TracesToTracesStability() component.StabilityLevel { +// factory implements the Factory interface. +type factory struct { + cfgType component.Type + component.CreateDefaultConfigFunc + + CreateTracesToTracesFunc + CreateTracesToMetricsFunc + CreateTracesToLogsFunc + + CreateMetricsToTracesFunc + CreateMetricsToMetricsFunc + CreateMetricsToLogsFunc + + CreateLogsToTracesFunc + CreateLogsToMetricsFunc + CreateLogsToLogsFunc + + tracesToTracesStabilityLevel component.StabilityLevel + tracesToMetricsStabilityLevel component.StabilityLevel + tracesToLogsStabilityLevel component.StabilityLevel + + metricsToTracesStabilityLevel component.StabilityLevel + metricsToMetricsStabilityLevel component.StabilityLevel + metricsToLogsStabilityLevel component.StabilityLevel + + logsToTracesStabilityLevel component.StabilityLevel + logsToMetricsStabilityLevel component.StabilityLevel + logsToLogsStabilityLevel component.StabilityLevel +} + +// Type returns the type of component. +func (f *factory) Type() component.Type { + return f.cfgType +} + +func (f *factory) unexportedFactoryFunc() {} + +func (f *factory) TracesToTracesStability() component.StabilityLevel { return f.tracesToTracesStabilityLevel } -func (f factory) TracesToMetricsStability() component.StabilityLevel { +func (f *factory) TracesToMetricsStability() component.StabilityLevel { return f.tracesToMetricsStabilityLevel } -func (f factory) TracesToLogsStability() component.StabilityLevel { +func (f *factory) TracesToLogsStability() component.StabilityLevel { return f.tracesToLogsStabilityLevel } -func (f factory) MetricsToTracesStability() component.StabilityLevel { +func (f *factory) MetricsToTracesStability() component.StabilityLevel { return f.metricsToTracesStabilityLevel } -func (f factory) MetricsToMetricsStability() component.StabilityLevel { +func (f *factory) MetricsToMetricsStability() component.StabilityLevel { return f.metricsToMetricsStabilityLevel } -func (f factory) MetricsToLogsStability() component.StabilityLevel { +func (f *factory) MetricsToLogsStability() component.StabilityLevel { return f.metricsToLogsStabilityLevel } -func (f factory) LogsToTracesStability() component.StabilityLevel { +func (f *factory) LogsToTracesStability() component.StabilityLevel { return f.logsToTracesStabilityLevel } -func (f factory) LogsToMetricsStability() component.StabilityLevel { +func (f *factory) LogsToMetricsStability() component.StabilityLevel { return f.logsToMetricsStabilityLevel } -func (f factory) LogsToLogsStability() component.StabilityLevel { +func (f *factory) LogsToLogsStability() component.StabilityLevel { return f.logsToLogsStabilityLevel } @@ -447,209 +398,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder processor is a helper struct that given a set of Configs and Factories helps with creating processors. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new connector.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTracesToTraces creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesToTracesStability()) - return f.CreateTracesToTraces(ctx, set, cfg, next) -} - -// CreateTracesToMetrics creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesToMetricsStability()) - return f.CreateTracesToMetrics(ctx, set, cfg, next) -} - -// CreateTracesToLogs creates a Traces connector based on the settings and config. -func (b *Builder) CreateTracesToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesToLogsStability()) - return f.CreateTracesToLogs(ctx, set, cfg, next) -} - -// CreateMetricsToTraces creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsToTracesStability()) - return f.CreateMetricsToTraces(ctx, set, cfg, next) -} - -// CreateMetricsToMetrics creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsToMetricsStability()) - return f.CreateMetricsToMetrics(ctx, set, cfg, next) -} - -// CreateMetricsToLogs creates a Metrics connector based on the settings and config. -func (b *Builder) CreateMetricsToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsToLogsStability()) - return f.CreateMetricsToLogs(ctx, set, cfg, next) -} - -// CreateLogsToTraces creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsToTracesStability()) - return f.CreateLogsToTraces(ctx, set, cfg, next) -} - -// CreateLogsToMetrics creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsToMetricsStability()) - return f.CreateLogsToMetrics(ctx, set, cfg, next) -} - -// CreateLogsToLogs creates a Logs connector based on the settings and config. -func (b *Builder) CreateLogsToLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("connector %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("connector factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsToLogsStability()) - return f.CreateLogsToLogs(ctx, set, cfg, next) -} - -func (b *Builder) IsConfigured(componentID component.ID) bool { - _, ok := b.cfgs[componentID] - return ok -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} - -func errDataTypes(id component.ID, from, to component.DataType) error { - return fmt.Errorf("connector %q cannot connect from %s to %s: %w", id, from, to, component.ErrDataTypeIsNotSupported) -} diff --git a/vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE b/vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/connectortest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile b/vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/connectortest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go b/vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go new file mode 100644 index 00000000000..fbc8febe94f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/connectortest/connector.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package connectortest // import "go.opentelemetry.io/collector/connector/connectortest" + +import ( + "context" + + "github.com/google/uuid" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/xconsumer" +) + +var nopType = component.MustNewType("nop") + +// NewNopSettings returns a new nop settings for Create* functions. +func NewNopSettings() connector.Settings { + return connector.Settings{ + ID: component.NewIDWithName(nopType, uuid.NewString()), + TelemetrySettings: componenttest.NewNopTelemetrySettings(), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +type nopConfig struct{} + +// NewNopFactory returns a connector.Factory that constructs nop processors. +func NewNopFactory() connector.Factory { + return xconnector.NewFactory( + nopType, + func() component.Config { + return &nopConfig{} + }, + xconnector.WithTracesToTraces(createTracesToTracesConnector, component.StabilityLevelDevelopment), + xconnector.WithTracesToMetrics(createTracesToMetricsConnector, component.StabilityLevelDevelopment), + xconnector.WithTracesToLogs(createTracesToLogsConnector, component.StabilityLevelDevelopment), + xconnector.WithTracesToProfiles(createTracesToProfilesConnector, component.StabilityLevelAlpha), + xconnector.WithMetricsToTraces(createMetricsToTracesConnector, component.StabilityLevelDevelopment), + xconnector.WithMetricsToMetrics(createMetricsToMetricsConnector, component.StabilityLevelDevelopment), + xconnector.WithMetricsToLogs(createMetricsToLogsConnector, component.StabilityLevelDevelopment), + xconnector.WithMetricsToProfiles(createMetricsToProfilesConnector, component.StabilityLevelAlpha), + xconnector.WithLogsToTraces(createLogsToTracesConnector, component.StabilityLevelDevelopment), + xconnector.WithLogsToMetrics(createLogsToMetricsConnector, component.StabilityLevelDevelopment), + xconnector.WithLogsToLogs(createLogsToLogsConnector, component.StabilityLevelDevelopment), + xconnector.WithLogsToProfiles(createLogsToProfilesConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToTraces(createProfilesToTracesConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToMetrics(createProfilesToMetricsConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToLogs(createProfilesToLogsConnector, component.StabilityLevelAlpha), + xconnector.WithProfilesToProfiles(createProfilesToProfilesConnector, component.StabilityLevelAlpha), + ) +} + +func createTracesToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createTracesToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createTracesToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createTracesToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Traces, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createMetricsToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Metrics, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createLogsToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Logs, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToTracesConnector(context.Context, connector.Settings, component.Config, consumer.Traces) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToMetricsConnector(context.Context, connector.Settings, component.Config, consumer.Metrics) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToLogsConnector(context.Context, connector.Settings, component.Config, consumer.Logs) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +func createProfilesToProfilesConnector(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (xconnector.Profiles, error) { + return &nopConnector{Consumer: consumertest.NewNop()}, nil +} + +// nopConnector stores consumed traces and metrics for testing purposes. +type nopConnector struct { + component.StartFunc + component.ShutdownFunc + consumertest.Consumer +} diff --git a/vendor/go.opentelemetry.io/collector/connector/internal/factory.go b/vendor/go.opentelemetry.io/collector/connector/internal/factory.go new file mode 100644 index 00000000000..b865f6311cb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/internal/factory.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/connector/internal" + +import ( + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" +) + +func ErrDataTypes(id component.ID, from, to pipeline.Signal) error { + return fmt.Errorf("connector %q cannot connect from %s to %s: %w", id, from, to, pipeline.ErrSignalNotSupported) +} diff --git a/vendor/go.opentelemetry.io/collector/connector/router.go b/vendor/go.opentelemetry.io/collector/connector/internal/router.go similarity index 50% rename from vendor/go.opentelemetry.io/collector/connector/router.go rename to vendor/go.opentelemetry.io/collector/connector/internal/router.go index bba7ee76bba..e951abd134f 100644 --- a/vendor/go.opentelemetry.io/collector/connector/router.go +++ b/vendor/go.opentelemetry.io/collector/connector/internal/router.go @@ -1,46 +1,47 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package connector // import "go.opentelemetry.io/collector/connector" +package internal // import "go.opentelemetry.io/collector/connector/internal" import ( + "errors" "fmt" "go.uber.org/multierr" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" ) -type baseRouter[T any] struct { +type BaseRouter[T any] struct { fanout func([]T) T - consumers map[component.ID]T + Consumers map[pipeline.ID]T } -func newBaseRouter[T any](fanout func([]T) T, cm map[component.ID]T) baseRouter[T] { - consumers := make(map[component.ID]T, len(cm)) +func NewBaseRouter[T any](fanout func([]T) T, cm map[pipeline.ID]T) BaseRouter[T] { + consumers := make(map[pipeline.ID]T, len(cm)) for k, v := range cm { consumers[k] = v } - return baseRouter[T]{fanout: fanout, consumers: consumers} + return BaseRouter[T]{fanout: fanout, Consumers: consumers} } -func (r *baseRouter[T]) PipelineIDs() []component.ID { - ids := make([]component.ID, 0, len(r.consumers)) - for id := range r.consumers { +func (r *BaseRouter[T]) PipelineIDs() []pipeline.ID { + ids := make([]pipeline.ID, 0, len(r.Consumers)) + for id := range r.Consumers { ids = append(ids, id) } return ids } -func (r *baseRouter[T]) Consumer(pipelineIDs ...component.ID) (T, error) { +func (r *BaseRouter[T]) Consumer(pipelineIDs ...pipeline.ID) (T, error) { var ret T if len(pipelineIDs) == 0 { - return ret, fmt.Errorf("missing consumers") + return ret, errors.New("missing consumers") } consumers := make([]T, 0, len(pipelineIDs)) var errors error for _, pipelineID := range pipelineIDs { - c, ok := r.consumers[pipelineID] + c, ok := r.Consumers[pipelineID] if ok { consumers = append(consumers, c) } else { diff --git a/vendor/go.opentelemetry.io/collector/connector/logs_router.go b/vendor/go.opentelemetry.io/collector/connector/logs_router.go index 0db9ea7799d..a2d74de29a3 100644 --- a/vendor/go.opentelemetry.io/collector/connector/logs_router.go +++ b/vendor/go.opentelemetry.io/collector/connector/logs_router.go @@ -4,55 +4,57 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( + "errors" "fmt" "go.uber.org/multierr" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" ) // LogsRouterAndConsumer feeds the first consumer.Logs in each of the specified pipelines. type LogsRouterAndConsumer interface { consumer.Logs - Consumer(...component.ID) (consumer.Logs, error) - PipelineIDs() []component.ID + Consumer(...pipeline.ID) (consumer.Logs, error) + PipelineIDs() []pipeline.ID privateFunc() } type logsRouter struct { consumer.Logs - baseRouter[consumer.Logs] + internal.BaseRouter[consumer.Logs] } -func NewLogsRouter(cm map[component.ID]consumer.Logs) LogsRouterAndConsumer { +func NewLogsRouter(cm map[pipeline.ID]consumer.Logs) LogsRouterAndConsumer { consumers := make([]consumer.Logs, 0, len(cm)) for _, cons := range cm { consumers = append(consumers, cons) } return &logsRouter{ Logs: fanoutconsumer.NewLogs(consumers), - baseRouter: newBaseRouter(fanoutconsumer.NewLogs, cm), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewLogs, cm), } } -func (r *logsRouter) PipelineIDs() []component.ID { - ids := make([]component.ID, 0, len(r.consumers)) - for id := range r.consumers { +func (r *logsRouter) PipelineIDs() []pipeline.ID { + ids := make([]pipeline.ID, 0, len(r.Consumers)) + for id := range r.Consumers { ids = append(ids, id) } return ids } -func (r *logsRouter) Consumer(pipelineIDs ...component.ID) (consumer.Logs, error) { +func (r *logsRouter) Consumer(pipelineIDs ...pipeline.ID) (consumer.Logs, error) { if len(pipelineIDs) == 0 { - return nil, fmt.Errorf("missing consumers") + return nil, errors.New("missing consumers") } consumers := make([]consumer.Logs, 0, len(pipelineIDs)) var errors error for _, pipelineID := range pipelineIDs { - c, ok := r.consumers[pipelineID] + c, ok := r.Consumers[pipelineID] if ok { consumers = append(consumers, c) } else { diff --git a/vendor/go.opentelemetry.io/collector/connector/metrics_router.go b/vendor/go.opentelemetry.io/collector/connector/metrics_router.go index 3e688261bfe..a45a6397fe0 100644 --- a/vendor/go.opentelemetry.io/collector/connector/metrics_router.go +++ b/vendor/go.opentelemetry.io/collector/connector/metrics_router.go @@ -4,32 +4,33 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" ) // MetricsRouterAndConsumer feeds the first consumer.Metrics in each of the specified pipelines. type MetricsRouterAndConsumer interface { consumer.Metrics - Consumer(...component.ID) (consumer.Metrics, error) - PipelineIDs() []component.ID + Consumer(...pipeline.ID) (consumer.Metrics, error) + PipelineIDs() []pipeline.ID privateFunc() } type metricsRouter struct { consumer.Metrics - baseRouter[consumer.Metrics] + internal.BaseRouter[consumer.Metrics] } -func NewMetricsRouter(cm map[component.ID]consumer.Metrics) MetricsRouterAndConsumer { +func NewMetricsRouter(cm map[pipeline.ID]consumer.Metrics) MetricsRouterAndConsumer { consumers := make([]consumer.Metrics, 0, len(cm)) for _, cons := range cm { consumers = append(consumers, cons) } return &metricsRouter{ Metrics: fanoutconsumer.NewMetrics(consumers), - baseRouter: newBaseRouter(fanoutconsumer.NewMetrics, cm), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewMetrics, cm), } } diff --git a/vendor/go.opentelemetry.io/collector/connector/traces_router.go b/vendor/go.opentelemetry.io/collector/connector/traces_router.go index 84eb889c05a..5622b78bf94 100644 --- a/vendor/go.opentelemetry.io/collector/connector/traces_router.go +++ b/vendor/go.opentelemetry.io/collector/connector/traces_router.go @@ -4,32 +4,33 @@ package connector // import "go.opentelemetry.io/collector/connector" import ( - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector/internal" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" ) // TracesRouterAndConsumer feeds the first consumer.Traces in each of the specified pipelines. type TracesRouterAndConsumer interface { consumer.Traces - Consumer(...component.ID) (consumer.Traces, error) - PipelineIDs() []component.ID + Consumer(...pipeline.ID) (consumer.Traces, error) + PipelineIDs() []pipeline.ID privateFunc() } type tracesRouter struct { consumer.Traces - baseRouter[consumer.Traces] + internal.BaseRouter[consumer.Traces] } -func NewTracesRouter(cm map[component.ID]consumer.Traces) TracesRouterAndConsumer { +func NewTracesRouter(cm map[pipeline.ID]consumer.Traces) TracesRouterAndConsumer { consumers := make([]consumer.Traces, 0, len(cm)) for _, cons := range cm { consumers = append(consumers, cons) } return &tracesRouter{ Traces: fanoutconsumer.NewTraces(consumers), - baseRouter: newBaseRouter(fanoutconsumer.NewTraces, cm), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewTraces, cm), } } diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE b/vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile b/vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go b/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go new file mode 100644 index 00000000000..d697e06a8e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/connector.go @@ -0,0 +1,331 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconnector // import "go.opentelemetry.io/collector/connector/xconnector" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/internal" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" +) + +type Factory interface { + connector.Factory + + CreateTracesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Traces, error) + CreateMetricsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Metrics, error) + CreateLogsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Logs, error) + + TracesToProfilesStability() component.StabilityLevel + MetricsToProfilesStability() component.StabilityLevel + LogsToProfilesStability() component.StabilityLevel + + CreateProfilesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) + CreateProfilesToTraces(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Traces) (Profiles, error) + CreateProfilesToMetrics(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Metrics) (Profiles, error) + CreateProfilesToLogs(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Logs) (Profiles, error) + + ProfilesToProfilesStability() component.StabilityLevel + ProfilesToTracesStability() component.StabilityLevel + ProfilesToMetricsStability() component.StabilityLevel + ProfilesToLogsStability() component.StabilityLevel +} + +// A Profiles connector acts as an exporter from a profiles pipeline and a receiver +// to one or more traces, metrics, logs, or profiles pipelines. +// Profiles feeds a consumer.Traces, consumer.Metrics, consumer.Logs, or xconsumer.Profiles with data. +// +// Examples: +// - Profiles could be collected in one pipeline and routed to another profiles pipeline +// based on criteria such as attributes or other content of the profile. The second +// pipeline can then process and export the profile to the appropriate backend. +// - Profiles could be summarized by a metrics connector that emits statistics describing +// the number of profiles observed. +// - Profiles could be analyzed by a logs connector that emits events when particular +// criteria are met. +type Profiles interface { + component.Component + xconsumer.Profiles +} + +// CreateTracesToProfilesFunc is the equivalent of Factory.CreateTracesToProfiles(). +type CreateTracesToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Traces, error) + +// CreateTracesToProfiles implements Factory.CreateTracesToProfiles(). +func (f CreateTracesToProfilesFunc) CreateTracesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Traces, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalTraces, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateMetricsToProfilesFunc is the equivalent of Factory.CreateMetricsToProfiles(). +type CreateMetricsToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Metrics, error) + +// CreateMetricsToProfiles implements Factory.CreateMetricsToProfiles(). +func (f CreateMetricsToProfilesFunc) CreateMetricsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Metrics, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalMetrics, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateLogsToProfilesFunc is the equivalent of Factory.CreateLogsToProfiles(). +type CreateLogsToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (connector.Logs, error) + +// CreateLogsToProfiles implements Factory.CreateLogsToProfiles(). +func (f CreateLogsToProfilesFunc) CreateLogsToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (connector.Logs, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, pipeline.SignalLogs, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToProfilesFunc is the equivalent of Factory.CreateProfilesToProfiles(). +type CreateProfilesToProfilesFunc func(context.Context, connector.Settings, component.Config, xconsumer.Profiles) (Profiles, error) + +// CreateProfilesToProfiles implements Factory.CreateProfilesToProfiles(). +func (f CreateProfilesToProfilesFunc) CreateProfilesToProfiles(ctx context.Context, set connector.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, xpipeline.SignalProfiles) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToTracesFunc is the equivalent of Factory.CreateProfilesToTraces(). +type CreateProfilesToTracesFunc func(context.Context, connector.Settings, component.Config, consumer.Traces) (Profiles, error) + +// CreateProfilesToTraces implements Factory.CreateProfilesToTraces(). +func (f CreateProfilesToTracesFunc) CreateProfilesToTraces(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Traces) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalTraces) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToMetricsFunc is the equivalent of Factory.CreateProfilesToMetrics(). +type CreateProfilesToMetricsFunc func(context.Context, connector.Settings, component.Config, consumer.Metrics) (Profiles, error) + +// CreateProfilesToMetrics implements Factory.CreateProfilesToMetrics(). +func (f CreateProfilesToMetricsFunc) CreateProfilesToMetrics(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Metrics) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalMetrics) + } + return f(ctx, set, cfg, next) +} + +// CreateProfilesToLogsFunc is the equivalent of Factory.CreateProfilesToLogs(). +type CreateProfilesToLogsFunc func(context.Context, connector.Settings, component.Config, consumer.Logs) (Profiles, error) + +// CreateProfilesToLogs implements Factory.CreateProfilesToLogs(). +func (f CreateProfilesToLogsFunc) CreateProfilesToLogs(ctx context.Context, set connector.Settings, cfg component.Config, next consumer.Logs) (Profiles, error) { + if f == nil { + return nil, internal.ErrDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalLogs) + } + return f(ctx, set, cfg, next) +} + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is an ReceiverFactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factoryOpts struct { + opts []connector.FactoryOption + + *factory +} + +// WithTracesToTraces overrides the default "error not supported" implementation for WithTracesToTraces and the default "undefined" stability level. +func WithTracesToTraces(createTracesToTraces connector.CreateTracesToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithTracesToTraces(createTracesToTraces, sl)) + }) +} + +// WithTracesToMetrics overrides the default "error not supported" implementation for WithTracesToMetrics and the default "undefined" stability level. +func WithTracesToMetrics(createTracesToMetrics connector.CreateTracesToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithTracesToMetrics(createTracesToMetrics, sl)) + }) +} + +// WithTracesToLogs overrides the default "error not supported" implementation for WithTracesToLogs and the default "undefined" stability level. +func WithTracesToLogs(createTracesToLogs connector.CreateTracesToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithTracesToLogs(createTracesToLogs, sl)) + }) +} + +// WithMetricsToTraces overrides the default "error not supported" implementation for WithMetricsToTraces and the default "undefined" stability level. +func WithMetricsToTraces(createMetricsToTraces connector.CreateMetricsToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithMetricsToTraces(createMetricsToTraces, sl)) + }) +} + +// WithMetricsToMetrics overrides the default "error not supported" implementation for WithMetricsToMetrics and the default "undefined" stability level. +func WithMetricsToMetrics(createMetricsToMetrics connector.CreateMetricsToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithMetricsToMetrics(createMetricsToMetrics, sl)) + }) +} + +// WithMetricsToLogs overrides the default "error not supported" implementation for WithMetricsToLogs and the default "undefined" stability level. +func WithMetricsToLogs(createMetricsToLogs connector.CreateMetricsToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithMetricsToLogs(createMetricsToLogs, sl)) + }) +} + +// WithLogsToTraces overrides the default "error not supported" implementation for WithLogsToTraces and the default "undefined" stability level. +func WithLogsToTraces(createLogsToTraces connector.CreateLogsToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithLogsToTraces(createLogsToTraces, sl)) + }) +} + +// WithLogsToMetrics overrides the default "error not supported" implementation for WithLogsToMetrics and the default "undefined" stability level. +func WithLogsToMetrics(createLogsToMetrics connector.CreateLogsToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithLogsToMetrics(createLogsToMetrics, sl)) + }) +} + +// WithLogsToLogs overrides the default "error not supported" implementation for WithLogsToLogs and the default "undefined" stability level. +func WithLogsToLogs(createLogsToLogs connector.CreateLogsToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, connector.WithLogsToLogs(createLogsToLogs, sl)) + }) +} + +// WithTracesToProfiles overrides the default "error not supported" implementation for WithTracesToProfiles and the default "undefined" stability level. +func WithTracesToProfiles(createTracesToProfiles CreateTracesToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.tracesToProfilesStabilityLevel = sl + o.CreateTracesToProfilesFunc = createTracesToProfiles + }) +} + +// WithMetricsToProfiles overrides the default "error not supported" implementation for WithMetricsToProfiles and the default "undefined" stability level. +func WithMetricsToProfiles(createMetricsToProfiles CreateMetricsToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.metricsToProfilesStabilityLevel = sl + o.CreateMetricsToProfilesFunc = createMetricsToProfiles + }) +} + +// WithLogsToProfiles overrides the default "error not supported" implementation for WithLogsToProfiles and the default "undefined" stability level. +func WithLogsToProfiles(createLogsToProfiles CreateLogsToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.logsToProfilesStabilityLevel = sl + o.CreateLogsToProfilesFunc = createLogsToProfiles + }) +} + +// WithProfilesToProfiles overrides the default "error not supported" implementation for WithProfilesToProfiles and the default "undefined" stability level. +func WithProfilesToProfiles(createProfilesToProfiles CreateProfilesToProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToProfilesStabilityLevel = sl + o.CreateProfilesToProfilesFunc = createProfilesToProfiles + }) +} + +// WithProfilesToTraces overrides the default "error not supported" implementation for WithProfilesToTraces and the default "undefined" stability level. +func WithProfilesToTraces(createProfilesToTraces CreateProfilesToTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToTracesStabilityLevel = sl + o.CreateProfilesToTracesFunc = createProfilesToTraces + }) +} + +// WithProfilesToMetrics overrides the default "error not supported" implementation for WithProfilesToMetrics and the default "undefined" stability level. +func WithProfilesToMetrics(createProfilesToMetrics CreateProfilesToMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToMetricsStabilityLevel = sl + o.CreateProfilesToMetricsFunc = createProfilesToMetrics + }) +} + +// WithProfilesToLogs overrides the default "error not supported" implementation for WithProfilesToLogs and the default "undefined" stability level. +func WithProfilesToLogs(createProfilesToLogs CreateProfilesToLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesToLogsStabilityLevel = sl + o.CreateProfilesToLogsFunc = createProfilesToLogs + }) +} + +// factory implements the Factory interface. +type factory struct { + connector.Factory + + CreateTracesToProfilesFunc + CreateMetricsToProfilesFunc + CreateLogsToProfilesFunc + + CreateProfilesToProfilesFunc + CreateProfilesToTracesFunc + CreateProfilesToMetricsFunc + CreateProfilesToLogsFunc + + tracesToProfilesStabilityLevel component.StabilityLevel + metricsToProfilesStabilityLevel component.StabilityLevel + logsToProfilesStabilityLevel component.StabilityLevel + + profilesToProfilesStabilityLevel component.StabilityLevel + profilesToTracesStabilityLevel component.StabilityLevel + profilesToMetricsStabilityLevel component.StabilityLevel + profilesToLogsStabilityLevel component.StabilityLevel +} + +func (f *factory) TracesToProfilesStability() component.StabilityLevel { + return f.tracesToProfilesStabilityLevel +} + +func (f *factory) MetricsToProfilesStability() component.StabilityLevel { + return f.metricsToProfilesStabilityLevel +} + +func (f *factory) LogsToProfilesStability() component.StabilityLevel { + return f.logsToProfilesStabilityLevel +} + +func (f *factory) ProfilesToProfilesStability() component.StabilityLevel { + return f.profilesToProfilesStabilityLevel +} + +func (f *factory) ProfilesToTracesStability() component.StabilityLevel { + return f.profilesToTracesStabilityLevel +} + +func (f *factory) ProfilesToMetricsStability() component.StabilityLevel { + return f.profilesToMetricsStabilityLevel +} + +func (f *factory) ProfilesToLogsStability() component.StabilityLevel { + return f.profilesToLogsStabilityLevel +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.Factory = connector.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go b/vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go new file mode 100644 index 00000000000..085305ee557 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/connector/xconnector/profiles_router.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconnector // import "go.opentelemetry.io/collector/connector/xconnector" + +import ( + "go.opentelemetry.io/collector/connector/internal" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" +) + +type ProfilesRouterAndConsumer interface { + xconsumer.Profiles + Consumer(...pipeline.ID) (xconsumer.Profiles, error) + PipelineIDs() []pipeline.ID + privateFunc() +} + +type profilesRouter struct { + xconsumer.Profiles + internal.BaseRouter[xconsumer.Profiles] +} + +func NewProfilesRouter(cm map[pipeline.ID]xconsumer.Profiles) ProfilesRouterAndConsumer { + consumers := make([]xconsumer.Profiles, 0, len(cm)) + for _, cons := range cm { + consumers = append(consumers, cons) + } + return &profilesRouter{ + Profiles: fanoutconsumer.NewProfiles(consumers), + BaseRouter: internal.NewBaseRouter(fanoutconsumer.NewProfiles, cm), + } +} + +func (r *profilesRouter) privateFunc() {} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/consumer.go index 503750ad7cb..b1b588a85c0 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumer.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumer.go @@ -5,52 +5,22 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "errors" + + "go.opentelemetry.io/collector/consumer/internal" ) // Capabilities describes the capabilities of a Processor. -type Capabilities struct { - // MutatesData is set to true if Consume* function of the - // processor modifies the input Traces, Logs or Metrics argument. - // Processors which modify the input data MUST set this flag to true. If the processor - // does not modify the data it MUST set this flag to false. If the processor creates - // a copy of the data before modifying then this flag can be safely set to false. - MutatesData bool -} - -type baseConsumer interface { - Capabilities() Capabilities -} +type Capabilities = internal.Capabilities var errNilFunc = errors.New("nil consumer func") -type baseImpl struct { - capabilities Capabilities -} - // Option to construct new consumers. -type Option func(*baseImpl) +type Option = internal.Option // WithCapabilities overrides the default GetCapabilities function for a processor. // The default GetCapabilities function returns mutable capabilities. func WithCapabilities(capabilities Capabilities) Option { - return func(o *baseImpl) { - o.capabilities = capabilities - } -} - -// Capabilities returns the capabilities of the component -func (bs baseImpl) Capabilities() Capabilities { - return bs.capabilities -} - -func newBaseImpl(options ...Option) *baseImpl { - bs := &baseImpl{ - capabilities: Capabilities{MutatesData: false}, - } - - for _, op := range options { - op(bs) - } - - return bs + return internal.OptionFunc(func(o *internal.BaseImpl) { + o.Cap = capabilities + }) } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile b/vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go b/vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go new file mode 100644 index 00000000000..feed1bc5bc7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/internal/retryable.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/consumer/consumererror/internal" + +import ( + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +type Retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs | pprofile.Profiles] struct { + Err error + Value V +} + +// Error provides the error message +func (err Retryable[V]) Error() string { + return err.Err.Error() +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (err Retryable[V]) Unwrap() error { + return err.Err +} + +// Data returns the telemetry data that failed to be processed or sent. +func (err Retryable[V]) Data() V { + return err.Value +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go b/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go index 1d7558ce1ca..69af253dae7 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/signalerrors.go @@ -4,38 +4,24 @@ package consumererror // import "go.opentelemetry.io/collector/consumer/consumererror" import ( + "go.opentelemetry.io/collector/consumer/consumererror/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) -type retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs] struct { - error - data V -} - -// Unwrap returns the wrapped error for functions Is and As in standard package errors. -func (err retryable[V]) Unwrap() error { - return err.error -} - -// Data returns the telemetry data that failed to be processed or sent. -func (err retryable[V]) Data() V { - return err.data -} - // Traces is an error that may carry associated Trace data for a subset of received data // that failed to be processed or sent. type Traces struct { - retryable[ptrace.Traces] + internal.Retryable[ptrace.Traces] } // NewTraces creates a Traces that can encapsulate received data that failed to be processed or sent. func NewTraces(err error, data ptrace.Traces) error { return Traces{ - retryable: retryable[ptrace.Traces]{ - error: err, - data: data, + Retryable: internal.Retryable[ptrace.Traces]{ + Err: err, + Value: data, }, } } @@ -43,15 +29,15 @@ func NewTraces(err error, data ptrace.Traces) error { // Logs is an error that may carry associated Log data for a subset of received data // that failed to be processed or sent. type Logs struct { - retryable[plog.Logs] + internal.Retryable[plog.Logs] } // NewLogs creates a Logs that can encapsulate received data that failed to be processed or sent. func NewLogs(err error, data plog.Logs) error { return Logs{ - retryable: retryable[plog.Logs]{ - error: err, - data: data, + Retryable: internal.Retryable[plog.Logs]{ + Err: err, + Value: data, }, } } @@ -59,15 +45,15 @@ func NewLogs(err error, data plog.Logs) error { // Metrics is an error that may carry associated Metrics data for a subset of received data // that failed to be processed or sent. type Metrics struct { - retryable[pmetric.Metrics] + internal.Retryable[pmetric.Metrics] } // NewMetrics creates a Metrics that can encapsulate received data that failed to be processed or sent. func NewMetrics(err error, data pmetric.Metrics) error { return Metrics{ - retryable: retryable[pmetric.Metrics]{ - error: err, - data: data, + Retryable: internal.Retryable[pmetric.Metrics]{ + Err: err, + Value: data, }, } } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/envprovider/Makefile rename to vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/Makefile diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go new file mode 100644 index 00000000000..6200c7fc503 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumererror/xconsumererror/signalerrors.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconsumererror // import "go.opentelemetry.io/collector/consumer/consumererror/xconsumererror" + +import ( + "go.opentelemetry.io/collector/consumer/consumererror/internal" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// Profiles is an error that may carry associated Profile data for a subset of received data +// that failed to be processed or sent. +type Profiles struct { + internal.Retryable[pprofile.Profiles] +} + +// NewProfiles creates a Profiles that can encapsulate received data that failed to be processed or sent. +func NewProfiles(err error, data pprofile.Profiles) error { + return Profiles{ + Retryable: internal.Retryable[pprofile.Profiles]{ + Err: err, + Value: data, + }, + } +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile b/vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go index 147ed55c7ed..ec7a3030631 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/consumer.go @@ -7,8 +7,10 @@ import ( "context" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -29,12 +31,18 @@ type Consumer interface { // ConsumeLogs to implement the consumer.Logs. ConsumeLogs(context.Context, plog.Logs) error + // ConsumeProfiles to implement the xconsumer.Profiles. + ConsumeProfiles(context.Context, pprofile.Profiles) error + unexported() } -var _ consumer.Logs = (Consumer)(nil) -var _ consumer.Metrics = (Consumer)(nil) -var _ consumer.Traces = (Consumer)(nil) +var ( + _ consumer.Logs = (Consumer)(nil) + _ consumer.Metrics = (Consumer)(nil) + _ consumer.Traces = (Consumer)(nil) + _ xconsumer.Profiles = (Consumer)(nil) +) type nonMutatingConsumer struct{} @@ -48,6 +56,7 @@ type baseConsumer struct { consumer.ConsumeTracesFunc consumer.ConsumeMetricsFunc consumer.ConsumeLogsFunc + xconsumer.ConsumeProfilesFunc } func (bc baseConsumer) unexported() {} diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go index d147453aaf7..fdc54ae2452 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/err.go @@ -7,14 +7,16 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) // NewErr returns a Consumer that just drops all received data and returns the specified error to Consume* callers. func NewErr(err error) Consumer { return &baseConsumer{ - ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return err }, - ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return err }, - ConsumeLogsFunc: func(context.Context, plog.Logs) error { return err }, + ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return err }, + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return err }, + ConsumeLogsFunc: func(context.Context, plog.Logs) error { return err }, + ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return err }, } } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go index fbb01e3bb98..25b898a7751 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/nop.go @@ -8,14 +8,16 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) // NewNop returns a Consumer that just drops all received data and returns no error. func NewNop() Consumer { return &baseConsumer{ - ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return nil }, - ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, - ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, + ConsumeTracesFunc: func(context.Context, ptrace.Traces) error { return nil }, + ConsumeMetricsFunc: func(context.Context, pmetric.Metrics) error { return nil }, + ConsumeLogsFunc: func(context.Context, plog.Logs) error { return nil }, + ConsumeProfilesFunc: func(context.Context, pprofile.Profiles) error { return nil }, } } diff --git a/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go b/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go index be7195af18f..e05dfccff98 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go +++ b/vendor/go.opentelemetry.io/collector/consumer/consumertest/sink.go @@ -8,8 +8,10 @@ import ( "sync" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -156,3 +158,51 @@ func (sle *LogsSink) Reset() { sle.logs = nil sle.logRecordCount = 0 } + +// ProfilesSink is a xconsumer.Profiles that acts like a sink that +// stores all profiles and allows querying them for testing. +type ProfilesSink struct { + nonMutatingConsumer + mu sync.Mutex + profiles []pprofile.Profiles + sampleCount int +} + +var _ xconsumer.Profiles = (*ProfilesSink)(nil) + +// ConsumeProfiles stores profiles to this sink. +func (ste *ProfilesSink) ConsumeProfiles(_ context.Context, td pprofile.Profiles) error { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.profiles = append(ste.profiles, td) + ste.sampleCount += td.SampleCount() + + return nil +} + +// AllProfiles returns the profiles stored by this sink since last Reset. +func (ste *ProfilesSink) AllProfiles() []pprofile.Profiles { + ste.mu.Lock() + defer ste.mu.Unlock() + + copyProfiles := make([]pprofile.Profiles, len(ste.profiles)) + copy(copyProfiles, ste.profiles) + return copyProfiles +} + +// ProfileRecordCount returns the number of profiles stored by this sink since last Reset. +func (ste *ProfilesSink) SampleCount() int { + ste.mu.Lock() + defer ste.mu.Unlock() + return ste.sampleCount +} + +// Reset deletes any stored data. +func (ste *ProfilesSink) Reset() { + ste.mu.Lock() + defer ste.mu.Unlock() + + ste.profiles = nil + ste.sampleCount = 0 +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go b/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go new file mode 100644 index 00000000000..45c90dd4341 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/internal/consumer.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/consumer/internal" + +// Capabilities describes the capabilities of a Processor. +type Capabilities struct { + // MutatesData is set to true if Consume* function of the + // processor modifies the input Traces, Logs or Metrics argument. + // Processors which modify the input data MUST set this flag to true. If the processor + // does not modify the data it MUST set this flag to false. If the processor creates + // a copy of the data before modifying then this flag can be safely set to false. + MutatesData bool +} + +type BaseConsumer interface { + Capabilities() Capabilities +} + +type BaseImpl struct { + Cap Capabilities +} + +// Option to construct new consumers. +type Option interface { + apply(*BaseImpl) +} + +type OptionFunc func(*BaseImpl) + +func (of OptionFunc) apply(e *BaseImpl) { + of(e) +} + +// Capabilities returns the capabilities of the component +func (bs BaseImpl) Capabilities() Capabilities { + return bs.Cap +} + +func NewBaseImpl(options ...Option) *BaseImpl { + bs := &BaseImpl{ + Cap: Capabilities{MutatesData: false}, + } + + for _, op := range options { + op.apply(bs) + } + + return bs +} diff --git a/vendor/go.opentelemetry.io/collector/consumer/logs.go b/vendor/go.opentelemetry.io/collector/consumer/logs.go index 5bf89a52f7a..15166ef1196 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/logs.go +++ b/vendor/go.opentelemetry.io/collector/consumer/logs.go @@ -6,13 +6,14 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" + "go.opentelemetry.io/collector/consumer/internal" "go.opentelemetry.io/collector/pdata/plog" ) // Logs is an interface that receives plog.Logs, processes it // as needed, and sends it to the next processing node if any or to the destination. type Logs interface { - baseConsumer + internal.BaseConsumer // ConsumeLogs receives plog.Logs for consumption. ConsumeLogs(ctx context.Context, ld plog.Logs) error } @@ -26,7 +27,7 @@ func (f ConsumeLogsFunc) ConsumeLogs(ctx context.Context, ld plog.Logs) error { } type baseLogs struct { - *baseImpl + *internal.BaseImpl ConsumeLogsFunc } @@ -36,7 +37,7 @@ func NewLogs(consume ConsumeLogsFunc, options ...Option) (Logs, error) { return nil, errNilFunc } return &baseLogs{ - baseImpl: newBaseImpl(options...), + BaseImpl: internal.NewBaseImpl(options...), ConsumeLogsFunc: consume, }, nil } diff --git a/vendor/go.opentelemetry.io/collector/consumer/metrics.go b/vendor/go.opentelemetry.io/collector/consumer/metrics.go index 50df60f02d0..47897f9363a 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/metrics.go +++ b/vendor/go.opentelemetry.io/collector/consumer/metrics.go @@ -6,13 +6,14 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" + "go.opentelemetry.io/collector/consumer/internal" "go.opentelemetry.io/collector/pdata/pmetric" ) // Metrics is an interface that receives pmetric.Metrics, processes it // as needed, and sends it to the next processing node if any or to the destination. type Metrics interface { - baseConsumer + internal.BaseConsumer // ConsumeMetrics receives pmetric.Metrics for consumption. ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error } @@ -26,7 +27,7 @@ func (f ConsumeMetricsFunc) ConsumeMetrics(ctx context.Context, md pmetric.Metri } type baseMetrics struct { - *baseImpl + *internal.BaseImpl ConsumeMetricsFunc } @@ -36,7 +37,7 @@ func NewMetrics(consume ConsumeMetricsFunc, options ...Option) (Metrics, error) return nil, errNilFunc } return &baseMetrics{ - baseImpl: newBaseImpl(options...), + BaseImpl: internal.NewBaseImpl(options...), ConsumeMetricsFunc: consume, }, nil } diff --git a/vendor/go.opentelemetry.io/collector/consumer/traces.go b/vendor/go.opentelemetry.io/collector/consumer/traces.go index 56cebd53b37..60df2d04536 100644 --- a/vendor/go.opentelemetry.io/collector/consumer/traces.go +++ b/vendor/go.opentelemetry.io/collector/consumer/traces.go @@ -6,13 +6,14 @@ package consumer // import "go.opentelemetry.io/collector/consumer" import ( "context" + "go.opentelemetry.io/collector/consumer/internal" "go.opentelemetry.io/collector/pdata/ptrace" ) // Traces is an interface that receives ptrace.Traces, processes it // as needed, and sends it to the next processing node if any or to the destination. type Traces interface { - baseConsumer + internal.BaseConsumer // ConsumeTraces receives ptrace.Traces for consumption. ConsumeTraces(ctx context.Context, td ptrace.Traces) error } @@ -26,7 +27,7 @@ func (f ConsumeTracesFunc) ConsumeTraces(ctx context.Context, td ptrace.Traces) } type baseTraces struct { - *baseImpl + *internal.BaseImpl ConsumeTracesFunc } @@ -36,7 +37,7 @@ func NewTraces(consume ConsumeTracesFunc, options ...Option) (Traces, error) { return nil, errNilFunc } return &baseTraces{ - baseImpl: newBaseImpl(options...), + BaseImpl: internal.NewBaseImpl(options...), ConsumeTracesFunc: consume, }, nil } diff --git a/vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go new file mode 100644 index 00000000000..88ba2eb5a38 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/consumer/xconsumer/profiles.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xconsumer // import "go.opentelemetry.io/collector/consumer/xconsumer" + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/internal" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var errNilFunc = errors.New("nil consumer func") + +// Profiles is an interface that receives pprofile.Profiles, processes it +// as needed, and sends it to the next processing node if any or to the destination. +type Profiles interface { + internal.BaseConsumer + // ConsumeProfiles receives pprofile.Profiles for consumption. + ConsumeProfiles(ctx context.Context, td pprofile.Profiles) error +} + +// ConsumeProfilesFunc is a helper function that is similar to ConsumeProfiles. +type ConsumeProfilesFunc func(ctx context.Context, td pprofile.Profiles) error + +// ConsumeProfiles calls f(ctx, td). +func (f ConsumeProfilesFunc) ConsumeProfiles(ctx context.Context, td pprofile.Profiles) error { + return f(ctx, td) +} + +type baseProfiles struct { + *internal.BaseImpl + ConsumeProfilesFunc +} + +// NewProfiles returns a Profiles configured with the provided options. +func NewProfiles(consume ConsumeProfilesFunc, options ...consumer.Option) (Profiles, error) { + if consume == nil { + return nil, errNilFunc + } + return &baseProfiles{ + BaseImpl: internal.NewBaseImpl(options...), + ConsumeProfilesFunc: consume, + }, nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporter.go index 818fa3ecac3..0e54bfb1b3c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporter.go @@ -7,10 +7,9 @@ import ( "context" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/pipeline" ) // Traces is an exporter that can consume traces. @@ -31,8 +30,8 @@ type Logs interface { consumer.Logs } -// CreateSettings configures exporter creators. -type CreateSettings struct { +// Settings configures exporter creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -49,77 +48,77 @@ type CreateSettings struct { type Factory interface { component.Factory - // CreateTracesExporter creates a TracesExporter based on this config. - // If the exporter type does not support tracing or if the config is not valid, - // an error will be returned instead. - CreateTracesExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Traces, error) + // CreateTraces creates a Traces exporter based on this config. + // If the exporter type does not support tracing, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateTraces(ctx context.Context, set Settings, cfg component.Config) (Traces, error) - // TracesExporterStability gets the stability level of the TracesExporter. - TracesExporterStability() component.StabilityLevel + // TracesStability gets the stability level of the Traces exporter. + TracesStability() component.StabilityLevel - // CreateMetricsExporter creates a MetricsExporter based on this config. - // If the exporter type does not support metrics or if the config is not valid, - // an error will be returned instead. - CreateMetricsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Metrics, error) + // CreateMetrics creates a Metrics exporter based on this config. + // If the exporter type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) - // MetricsExporterStability gets the stability level of the MetricsExporter. - MetricsExporterStability() component.StabilityLevel + // MetricsStability gets the stability level of the Metrics exporter. + MetricsStability() component.StabilityLevel - // CreateLogsExporter creates a LogsExporter based on the config. - // If the exporter type does not support logs or if the config is not valid, - // an error will be returned instead. - CreateLogsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Logs, error) + // CreateLogs creates a Logs exporter based on the config. + // If the exporter type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateLogs(ctx context.Context, set Settings, cfg component.Config) (Logs, error) - // LogsExporterStability gets the stability level of the LogsExporter. - LogsExporterStability() component.StabilityLevel + // LogsStability gets the stability level of the Logs exporter. + LogsStability() component.StabilityLevel unexportedFactoryFunc() } // FactoryOption apply changes to Factory. type FactoryOption interface { - // applyExporterFactoryOption applies the option. - applyExporterFactoryOption(o *factory) + // applyOption applies the option. + applyOption(o *factory) } var _ FactoryOption = (*factoryOptionFunc)(nil) -// factoryOptionFunc is an ExporterFactoryOption created through a function. +// factoryOptionFunc is an FactoryOption created through a function. type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyExporterFactoryOption(o *factory) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } // CreateTracesFunc is the equivalent of Factory.CreateTraces. -type CreateTracesFunc func(context.Context, CreateSettings, component.Config) (Traces, error) +type CreateTracesFunc func(context.Context, Settings, component.Config) (Traces, error) -// CreateTracesExporter implements ExporterFactory.CreateTracesExporter(). -func (f CreateTracesFunc) CreateTracesExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Traces, error) { +// CreateTraces implements Factory.CreateTraces. +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config) (Traces, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } return f(ctx, set, cfg) } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics. -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config) (Metrics, error) +type CreateMetricsFunc func(context.Context, Settings, component.Config) (Metrics, error) -// CreateMetricsExporter implements ExporterFactory.CreateMetricsExporter(). -func (f CreateMetricsFunc) CreateMetricsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Metrics, error) { +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config) (Metrics, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } return f(ctx, set, cfg) } // CreateLogsFunc is the equivalent of Factory.CreateLogs. -type CreateLogsFunc func(context.Context, CreateSettings, component.Config) (Logs, error) +type CreateLogsFunc func(context.Context, Settings, component.Config) (Logs, error) -// CreateLogsExporter implements Factory.CreateLogsExporter(). -func (f CreateLogsFunc) CreateLogsExporter(ctx context.Context, set CreateSettings, cfg component.Config) (Logs, error) { +// CreateLogs implements Factory.CreateLogs. +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config) (Logs, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } return f(ctx, set, cfg) } @@ -141,19 +140,19 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f *factory) TracesExporterStability() component.StabilityLevel { +func (f *factory) TracesStability() component.StabilityLevel { return f.tracesStabilityLevel } -func (f *factory) MetricsExporterStability() component.StabilityLevel { +func (f *factory) MetricsStability() component.StabilityLevel { return f.metricsStabilityLevel } -func (f *factory) LogsExporterStability() component.StabilityLevel { +func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } -// WithTraces overrides the default "error not supported" implementation for CreateTracesExporter and the default "undefined" stability level. +// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl @@ -161,7 +160,7 @@ func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) Fact }) } -// WithMetrics overrides the default "error not supported" implementation for CreateMetricsExporter and the default "undefined" stability level. +// WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.metricsStabilityLevel = sl @@ -169,7 +168,7 @@ func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) F }) } -// WithLogs overrides the default "error not supported" implementation for CreateLogsExporter and the default "undefined" stability level. +// WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.logsStabilityLevel = sl @@ -184,7 +183,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa CreateDefaultConfigFunc: createDefaultConfig, } for _, opt := range options { - opt.applyExporterFactoryOption(f) + opt.applyOption(f) } return f } @@ -201,77 +200,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder exporter is a helper struct that given a set of Configs and Factories helps with creating exporters. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new exporter.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTraces creates a Traces exporter based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings) (Traces, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("exporter %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesExporterStability()) - return f.CreateTracesExporter(ctx, set, cfg) -} - -// CreateMetrics creates a Metrics exporter based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings) (Metrics, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("exporter %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsExporterStability()) - return f.CreateMetricsExporter(ctx, set, cfg) -} - -// CreateLogs creates a Logs exporter based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings) (Logs, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("exporter %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsExporterStability()) - return f.CreateLogsExporter(ctx, set, cfg) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go b/vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go deleted file mode 100644 index 0298276ba7b..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterbatcher/batch_func.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterbatcher // import "go.opentelemetry.io/collector/exporter/exporterbatcher" - -import "context" - -// BatchMergeFunc is a function that merges two requests into a single request. -// Do not mutate the requests passed to the function if error can be returned after mutation or if the exporter is -// marked as not mutable. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type BatchMergeFunc[T any] func(context.Context, T, T) (T, error) - -// BatchMergeSplitFunc is a function that merge and/or splits one or two requests into multiple requests based on the -// configured limit provided in MaxSizeConfig. -// All the returned requests MUST have a number of items that does not exceed the maximum number of items. -// Size of the last returned request MUST be less or equal than the size of any other returned request. -// The original request MUST not be mutated if error is returned after mutation or if the exporter is -// marked as not mutable. The length of the returned slice MUST not be 0. The optionalReq argument can be nil, -// make sure to check it before using. -// Experimental: This API is at the early stage of development and may change without backward compatibility -// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type BatchMergeSplitFunc[T any] func(ctx context.Context, cfg MaxSizeConfig, optionalReq T, req T) ([]T, error) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md index e32c948c924..0020c25c1d9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/README.md @@ -109,4 +109,3 @@ service: ``` [filestorage]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage -[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go index aa84e9019f2..ab1f0db4e0b 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/common.go @@ -4,115 +4,46 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "fmt" - - "go.uber.org/multierr" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" ) -// requestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). -type requestSender interface { - component.Component - send(context.Context, Request) error - setNextSender(nextSender requestSender) -} - -type baseRequestSender struct { - component.StartFunc - component.ShutdownFunc - nextSender requestSender -} - -var _ requestSender = (*baseRequestSender)(nil) - -func (b *baseRequestSender) send(ctx context.Context, req Request) error { - return b.nextSender.send(ctx, req) -} - -func (b *baseRequestSender) setNextSender(nextSender requestSender) { - b.nextSender = nextSender -} - -type obsrepSenderFactory func(obsrep *ObsReport) requestSender - -// Option apply changes to baseExporter. -type Option func(*baseExporter) error +// Option apply changes to BaseExporter. +type Option = internal.Option // WithStart overrides the default Start function for an exporter. // The default start function does nothing and always returns nil. func WithStart(start component.StartFunc) Option { - return func(o *baseExporter) error { - o.StartFunc = start - return nil - } + return internal.WithStart(start) } // WithShutdown overrides the default Shutdown function for an exporter. // The default shutdown function does nothing and always returns nil. func WithShutdown(shutdown component.ShutdownFunc) Option { - return func(o *baseExporter) error { - o.ShutdownFunc = shutdown - return nil - } + return internal.WithShutdown(shutdown) } -// WithTimeout overrides the default TimeoutSettings for an exporter. -// The default TimeoutSettings is 5 seconds. -func WithTimeout(timeoutSettings TimeoutSettings) Option { - return func(o *baseExporter) error { - o.timeoutSender.cfg = timeoutSettings - return nil - } +// WithTimeout overrides the default TimeoutConfig for an exporter. +// The default TimeoutConfig is 5 seconds. +func WithTimeout(timeoutConfig TimeoutConfig) Option { + return internal.WithTimeout(timeoutConfig) } // WithRetry overrides the default configretry.BackOffConfig for an exporter. // The default configretry.BackOffConfig is to disable retries. func WithRetry(config configretry.BackOffConfig) Option { - return func(o *baseExporter) error { - if !config.Enabled { - o.exportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." - return nil - } - o.retrySender = newRetrySender(config, o.set) - return nil - } + return internal.WithRetry(config) } -// WithQueue overrides the default QueueSettings for an exporter. -// The default QueueSettings is to disable queueing. +// WithQueue overrides the default QueueConfig for an exporter. +// The default QueueConfig is to disable queueing. // This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. -func WithQueue(config QueueSettings) Option { - return func(o *baseExporter) error { - if o.marshaler == nil || o.unmarshaler == nil { - return fmt.Errorf("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") - } - if !config.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - qf := exporterqueue.NewPersistentQueueFactory[Request](config.StorageID, exporterqueue.PersistentQueueSettings[Request]{ - Marshaler: o.marshaler, - Unmarshaler: o.unmarshaler, - }) - q := qf(context.Background(), exporterqueue.Settings{ - DataType: o.signal, - ExporterSettings: o.set, - }, exporterqueue.Config{ - Enabled: config.Enabled, - NumConsumers: config.NumConsumers, - QueueSize: config.QueueSize, - }) - o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage) - return nil - } +func WithQueue(config internal.QueueConfig) Option { + return internal.WithQueue(config) } // WithRequestQueue enables queueing for an exporter. @@ -120,49 +51,14 @@ func WithQueue(config QueueSettings) Option { // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[Request]) Option { - return func(o *baseExporter) error { - if o.marshaler != nil || o.unmarshaler != nil { - return fmt.Errorf("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") - } - if !cfg.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - set := exporterqueue.Settings{ - DataType: o.signal, - ExporterSettings: o.set, - } - o.queueSender = newQueueSender(queueFactory(context.Background(), set, cfg), o.set, cfg.NumConsumers, o.exportFailureMessage) - return nil - } + return internal.WithRequestQueue(cfg, queueFactory) } // WithCapabilities overrides the default Capabilities() function for a Consumer. // The default is non-mutable data. // TODO: Verify if we can change the default to be mutable as we do for processors. func WithCapabilities(capabilities consumer.Capabilities) Option { - return func(o *baseExporter) error { - o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities)) - return nil - } -} - -// BatcherOption apply changes to batcher sender. -type BatcherOption func(*batchSender) error - -// WithRequestBatchFuncs sets the functions for merging and splitting batches for an exporter built for custom request types. -func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) BatcherOption { - return func(bs *batchSender) error { - if mf == nil || msf == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided with non-nil functions") - } - if bs.mergeFunc != nil || bs.mergeSplitFunc != nil { - return fmt.Errorf("WithRequestBatchFuncs can only be used once with request-based exporters") - } - bs.mergeFunc = mf - bs.mergeSplitFunc = msf - return nil - } + return internal.WithCapabilities(capabilities) } // WithBatcher enables batching for an exporter based on custom request types. @@ -170,166 +66,6 @@ func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf expor // WithRequestBatchFuncs provided. // This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func WithBatcher(cfg exporterbatcher.Config, opts ...BatcherOption) Option { - return func(o *baseExporter) error { - if !cfg.Enabled { - return nil - } - - bs := newBatchSender(cfg, o.set, o.batchMergeFunc, o.batchMergeSplitfunc) - for _, opt := range opts { - if err := opt(bs); err != nil { - return err - } - } - if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters") - } - o.batchSender = bs - return nil - } -} - -// withMarshaler is used to set the request marshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withMarshaler(marshaler exporterqueue.Marshaler[Request]) Option { - return func(o *baseExporter) error { - o.marshaler = marshaler - return nil - } -} - -// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withUnmarshaler(unmarshaler exporterqueue.Unmarshaler[Request]) Option { - return func(o *baseExporter) error { - o.unmarshaler = unmarshaler - return nil - } -} - -// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. -// It must be provided as the first option when creating a new exporter helper. -func withBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) Option { - return func(o *baseExporter) error { - o.batchMergeFunc = mf - o.batchMergeSplitfunc = msf - return nil - } -} - -// baseExporter contains common fields between different exporter types. -type baseExporter struct { - component.StartFunc - component.ShutdownFunc - - signal component.DataType - - batchMergeFunc exporterbatcher.BatchMergeFunc[Request] - batchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[Request] - - marshaler exporterqueue.Marshaler[Request] - unmarshaler exporterqueue.Unmarshaler[Request] - - set exporter.CreateSettings - obsrep *ObsReport - - // Message for the user to be added with an export failure message. - exportFailureMessage string - - // Chain of senders that the exporter helper applies before passing the data to the actual exporter. - // The data is handled by each sender in the respective order starting from the queueSender. - // Most of the senders are optional, and initialized with a no-op path-through sender. - batchSender requestSender - queueSender requestSender - obsrepSender requestSender - retrySender requestSender - timeoutSender *timeoutSender // timeoutSender is always initialized. - - consumerOptions []consumer.Option -} - -func newBaseExporter(set exporter.CreateSettings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { - obsReport, err := NewObsReport(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set}) - if err != nil { - return nil, err - } - - be := &baseExporter{ - signal: signal, - - batchSender: &baseRequestSender{}, - queueSender: &baseRequestSender{}, - obsrepSender: osf(obsReport), - retrySender: &baseRequestSender{}, - timeoutSender: &timeoutSender{cfg: NewDefaultTimeoutSettings()}, - - set: set, - obsrep: obsReport, - } - - for _, op := range options { - err = multierr.Append(err, op(be)) - } - if err != nil { - return nil, err - } - - be.connectSenders() - - if bs, ok := be.batchSender.(*batchSender); ok { - // If queue sender is enabled assign to the batch sender the same number of workers. - if qs, ok := be.queueSender.(*queueSender); ok { - bs.concurrencyLimit = uint64(qs.numConsumers) - } - // Batcher sender mutates the data. - be.consumerOptions = append(be.consumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) - } - - return be, nil -} - -// send sends the request using the first sender in the chain. -func (be *baseExporter) send(ctx context.Context, req Request) error { - err := be.queueSender.send(ctx, req) - if err != nil { - be.set.Logger.Error("Exporting failed. Rejecting data."+be.exportFailureMessage, - zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) - } - return err -} - -// connectSenders connects the senders in the predefined order. -func (be *baseExporter) connectSenders() { - be.queueSender.setNextSender(be.batchSender) - be.batchSender.setNextSender(be.obsrepSender) - be.obsrepSender.setNextSender(be.retrySender) - be.retrySender.setNextSender(be.timeoutSender) -} - -func (be *baseExporter) Start(ctx context.Context, host component.Host) error { - // First start the wrapped exporter. - if err := be.StartFunc.Start(ctx, host); err != nil { - return err - } - - // If no error then start the batchSender. - if err := be.batchSender.Start(ctx, host); err != nil { - return err - } - - // Last start the queueSender. - return be.queueSender.Start(ctx, host) -} - -func (be *baseExporter) Shutdown(ctx context.Context) error { - return multierr.Combine( - // First shutdown the retry sender, so the queue sender can flush the queue without retries. - be.retrySender.Shutdown(ctx), - // Then shutdown the batch sender - be.batchSender.Shutdown(ctx), - // Then shutdown the queue sender. - be.queueSender.Shutdown(ctx), - // Last shutdown the wrapped exporter itself. - be.ShutdownFunc.Shutdown(ctx)) +func WithBatcher(cfg exporterbatcher.Config) Option { + return internal.WithBatcher(cfg) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md index ac974e01dd2..6a94947aeba 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/documentation.md @@ -6,74 +6,90 @@ The following telemetry is emitted by this component. -### exporter_enqueue_failed_log_records +### otelcol_exporter_enqueue_failed_log_records -Number of log records failed to be added to the sending queue. +Number of log records failed to be added to the sending queue. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### exporter_enqueue_failed_metric_points +### otelcol_exporter_enqueue_failed_metric_points -Number of metric points failed to be added to the sending queue. +Number of metric points failed to be added to the sending queue. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### exporter_enqueue_failed_spans +### otelcol_exporter_enqueue_failed_spans -Number of spans failed to be added to the sending queue. +Number of spans failed to be added to the sending queue. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | -### exporter_send_failed_log_records +### otelcol_exporter_queue_capacity -Number of log records in failed attempts to send to destination. +Fixed capacity of the retry queue (in batches) [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {batches} | Gauge | Int | + +### otelcol_exporter_queue_size + +Current size of the retry queue (in batches) [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {batches} | Gauge | Int | + +### otelcol_exporter_send_failed_log_records + +Number of log records in failed attempts to send to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### exporter_send_failed_metric_points +### otelcol_exporter_send_failed_metric_points -Number of metric points in failed attempts to send to destination. +Number of metric points in failed attempts to send to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### exporter_send_failed_spans +### otelcol_exporter_send_failed_spans -Number of spans in failed attempts to send to destination. +Number of spans in failed attempts to send to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | -### exporter_sent_log_records +### otelcol_exporter_sent_log_records -Number of log record successfully sent to destination. +Number of log record successfully sent to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### exporter_sent_metric_points +### otelcol_exporter_sent_metric_points -Number of metric points successfully sent to destination. +Number of metric points successfully sent to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### exporter_sent_spans +### otelcol_exporter_sent_spans -Number of spans successfully sent to destination. +Number of spans successfully sent to destination. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go new file mode 100644 index 00000000000..d9e90d821d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelper.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +import "go.opentelemetry.io/collector/exporter/internal" + +// Request represents a single request that can be sent to an external endpoint. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type Request = internal.Request + +// RequestErrorHandler is an optional interface that can be implemented by Request to provide a way handle partial +// temporary failures. For example, if some items failed to process and can be retried, this interface allows to +// return a new Request that contains the items left to be sent. Otherwise, the original Request should be returned. +// If not implemented, the original Request will be returned assuming the error is applied to the whole Request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestErrorHandler = internal.RequestErrorHandler diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go new file mode 100644 index 00000000000..a76a725981f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/base_exporter.go @@ -0,0 +1,311 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/codes" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" // BaseExporter contains common fields between different exporter types. + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pipeline" +) + +var usePullingBasedExporterQueueBatcher = featuregate.GlobalRegistry().MustRegister( + "exporter.UsePullingBasedExporterQueueBatcher", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.115.0"), + featuregate.WithRegisterDescription("if set to true, turns on the pulling-based exporter queue bathcer"), +) + +type ObsrepSenderFactory = func(obsrep *ObsReport) RequestSender + +// Option apply changes to BaseExporter. +type Option func(*BaseExporter) error + +type BaseExporter struct { + component.StartFunc + component.ShutdownFunc + + Signal pipeline.Signal + + Marshaler exporterqueue.Marshaler[internal.Request] + Unmarshaler exporterqueue.Unmarshaler[internal.Request] + + Set exporter.Settings + Obsrep *ObsReport + + // Message for the user to be added with an export failure message. + ExportFailureMessage string + + // Chain of senders that the exporter helper applies before passing the data to the actual exporter. + // The data is handled by each sender in the respective order starting from the queueSender. + // Most of the senders are optional, and initialized with a no-op path-through sender. + BatchSender RequestSender + QueueSender RequestSender + ObsrepSender RequestSender + RetrySender RequestSender + TimeoutSender *TimeoutSender // TimeoutSender is always initialized. + + ConsumerOptions []consumer.Option + + queueCfg exporterqueue.Config + queueFactory exporterqueue.Factory[internal.Request] + BatcherCfg exporterbatcher.Config +} + +func NewBaseExporter(set exporter.Settings, signal pipeline.Signal, osf ObsrepSenderFactory, options ...Option) (*BaseExporter, error) { + obsReport, err := NewExporter(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set, Signal: signal}) + if err != nil { + return nil, err + } + + be := &BaseExporter{ + Signal: signal, + + BatchSender: &BaseRequestSender{}, + QueueSender: &BaseRequestSender{}, + ObsrepSender: osf(obsReport), + RetrySender: &BaseRequestSender{}, + TimeoutSender: &TimeoutSender{cfg: NewDefaultTimeoutConfig()}, + + Set: set, + Obsrep: obsReport, + } + + for _, op := range options { + err = multierr.Append(err, op(be)) + } + if err != nil { + return nil, err + } + + if be.queueCfg.Enabled { + q := be.queueFactory( + context.Background(), + exporterqueue.Settings{ + Signal: be.Signal, + ExporterSettings: be.Set, + }, + be.queueCfg) + be.QueueSender = NewQueueSender(q, be.Set, be.queueCfg.NumConsumers, be.ExportFailureMessage, be.Obsrep, be.BatcherCfg) + for _, op := range options { + err = multierr.Append(err, op(be)) + } + } + + if !usePullingBasedExporterQueueBatcher.IsEnabled() && be.BatcherCfg.Enabled || + usePullingBasedExporterQueueBatcher.IsEnabled() && be.BatcherCfg.Enabled && !be.queueCfg.Enabled { + bs := NewBatchSender(be.BatcherCfg, be.Set) + be.BatchSender = bs + } + + if err != nil { + return nil, err + } + + be.connectSenders() + + if bs, ok := be.BatchSender.(*BatchSender); ok { + // If queue sender is enabled assign to the batch sender the same number of workers. + if qs, ok := be.QueueSender.(*QueueSender); ok { + bs.concurrencyLimit = int64(qs.numConsumers) + } + // Batcher sender mutates the data. + be.ConsumerOptions = append(be.ConsumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) + } + + return be, nil +} + +// send sends the request using the first sender in the chain. +func (be *BaseExporter) Send(ctx context.Context, req internal.Request) error { + err := be.QueueSender.Send(ctx, req) + if err != nil { + be.Set.Logger.Error("Exporting failed. Rejecting data."+be.ExportFailureMessage, + zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) + } + return err +} + +// connectSenders connects the senders in the predefined order. +func (be *BaseExporter) connectSenders() { + be.QueueSender.SetNextSender(be.BatchSender) + be.BatchSender.SetNextSender(be.ObsrepSender) + be.ObsrepSender.SetNextSender(be.RetrySender) + be.RetrySender.SetNextSender(be.TimeoutSender) +} + +func (be *BaseExporter) Start(ctx context.Context, host component.Host) error { + // First start the wrapped exporter. + if err := be.StartFunc.Start(ctx, host); err != nil { + return err + } + + // If no error then start the BatchSender. + if err := be.BatchSender.Start(ctx, host); err != nil { + return err + } + + // Last start the queueSender. + return be.QueueSender.Start(ctx, host) +} + +func (be *BaseExporter) Shutdown(ctx context.Context) error { + return multierr.Combine( + // First shutdown the retry sender, so the queue sender can flush the queue without retries. + be.RetrySender.Shutdown(ctx), + // Then shutdown the batch sender + be.BatchSender.Shutdown(ctx), + // Then shutdown the queue sender. + be.QueueSender.Shutdown(ctx), + // Last shutdown the wrapped exporter itself. + be.ShutdownFunc.Shutdown(ctx)) +} + +// WithStart overrides the default Start function for an exporter. +// The default start function does nothing and always returns nil. +func WithStart(start component.StartFunc) Option { + return func(o *BaseExporter) error { + o.StartFunc = start + return nil + } +} + +// WithShutdown overrides the default Shutdown function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown component.ShutdownFunc) Option { + return func(o *BaseExporter) error { + o.ShutdownFunc = shutdown + return nil + } +} + +// WithTimeout overrides the default TimeoutConfig for an exporter. +// The default TimeoutConfig is 5 seconds. +func WithTimeout(timeoutConfig TimeoutConfig) Option { + return func(o *BaseExporter) error { + o.TimeoutSender.cfg = timeoutConfig + return nil + } +} + +// WithRetry overrides the default configretry.BackOffConfig for an exporter. +// The default configretry.BackOffConfig is to disable retries. +func WithRetry(config configretry.BackOffConfig) Option { + return func(o *BaseExporter) error { + if !config.Enabled { + o.ExportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." + return nil + } + o.RetrySender = newRetrySender(config, o.Set) + return nil + } +} + +// WithQueue overrides the default QueueConfig for an exporter. +// The default QueueConfig is to disable queueing. +// This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +func WithQueue(config QueueConfig) Option { + return func(o *BaseExporter) error { + if o.Marshaler == nil || o.Unmarshaler == nil { + return errors.New("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") + } + if !config.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + o.queueCfg = exporterqueue.Config{ + Enabled: config.Enabled, + NumConsumers: config.NumConsumers, + QueueSize: config.QueueSize, + } + o.queueFactory = exporterqueue.NewPersistentQueueFactory[internal.Request](config.StorageID, exporterqueue.PersistentQueueSettings[internal.Request]{ + Marshaler: o.Marshaler, + Unmarshaler: o.Unmarshaler, + }) + return nil + } +} + +// WithRequestQueue enables queueing for an exporter. +// This option should be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[internal.Request]) Option { + return func(o *BaseExporter) error { + if o.Marshaler != nil || o.Unmarshaler != nil { + return errors.New("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") + } + if !cfg.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + o.queueCfg = cfg + o.queueFactory = queueFactory + return nil + } +} + +// WithCapabilities overrides the default Capabilities() function for a Consumer. +// The default is non-mutable data. +// TODO: Verify if we can change the default to be mutable as we do for processors. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *BaseExporter) error { + o.ConsumerOptions = append(o.ConsumerOptions, consumer.WithCapabilities(capabilities)) + return nil + } +} + +// WithBatcher enables batching for an exporter based on custom request types. +// For now, it can be used only with the New[Traces|Metrics|Logs]RequestExporter exporter helpers and +// WithRequestBatchFuncs provided. +// This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithBatcher(cfg exporterbatcher.Config) Option { + return func(o *BaseExporter) error { + o.BatcherCfg = cfg + return nil + } +} + +// WithMarshaler is used to set the request marshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithMarshaler(marshaler exporterqueue.Marshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Marshaler = marshaler + return nil + } +} + +// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithUnmarshaler(unmarshaler exporterqueue.Unmarshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Unmarshaler = unmarshaler + return nil + } +} + +func CheckStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { + if err != nil { + require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) + require.EqualError(t, err, sd.Status().Description, "SpanData %v", sd) + } else { + require.Equal(t, codes.Unset, sd.Status().Code, "SpanData %v", sd) + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/batch_sender.go similarity index 64% rename from vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go rename to vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/batch_sender.go index 086c9724aa3..21eed2c91d8 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/batch_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/batch_sender.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -14,29 +14,27 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" ) -// batchSender is a component that places requests into batches before passing them to the downstream senders. +// BatchSender is a component that places requests into batches before passing them to the downstream senders. // Batches are sent out with any of the following conditions: // - batch size reaches cfg.MinSizeItems // - cfg.FlushTimeout is elapsed since the timestamp when the previous batch was sent out. // - concurrencyLimit is reached. -type batchSender struct { - baseRequestSender - cfg exporterbatcher.Config - mergeFunc exporterbatcher.BatchMergeFunc[Request] - mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[Request] +type BatchSender struct { + BaseRequestSender + cfg exporterbatcher.Config // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. // If this number is reached and all the goroutines are busy, the batch will be sent right away. // Populated from the number of queue consumers if queue is enabled. - concurrencyLimit uint64 - activeRequests atomic.Uint64 - - resetTimerCh chan struct{} + concurrencyLimit int64 + activeRequests atomic.Int64 mu sync.Mutex activeBatch *batch + lastFlushed time.Time logger *zap.Logger @@ -46,23 +44,20 @@ type batchSender struct { } // newBatchSender returns a new batch consumer component. -func newBatchSender(cfg exporterbatcher.Config, set exporter.CreateSettings, - mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) *batchSender { - bs := &batchSender{ +func NewBatchSender(cfg exporterbatcher.Config, set exporter.Settings) *BatchSender { + bs := &BatchSender{ activeBatch: newEmptyBatch(), cfg: cfg, logger: set.Logger, - mergeFunc: mf, - mergeSplitFunc: msf, - shutdownCh: make(chan struct{}), + shutdownCh: nil, shutdownCompleteCh: make(chan struct{}), stopped: &atomic.Bool{}, - resetTimerCh: make(chan struct{}), } return bs } -func (bs *batchSender) Start(_ context.Context, _ component.Host) error { +func (bs *BatchSender) Start(_ context.Context, _ component.Host) error { + bs.shutdownCh = make(chan struct{}) timer := time.NewTimer(bs.cfg.FlushTimeout) go func() { for { @@ -84,16 +79,17 @@ func (bs *batchSender) Start(_ context.Context, _ component.Host) error { return case <-timer.C: bs.mu.Lock() + nextFlush := bs.cfg.FlushTimeout if bs.activeBatch.request != nil { - bs.exportActiveBatch() + sinceLastFlush := time.Since(bs.lastFlushed) + if sinceLastFlush >= bs.cfg.FlushTimeout { + bs.exportActiveBatch() + } else { + nextFlush = bs.cfg.FlushTimeout - sinceLastFlush + } } bs.mu.Unlock() - timer.Reset(bs.cfg.FlushTimeout) - case <-bs.resetTimerCh: - if !timer.Stop() { - <-timer.C - } - timer.Reset(bs.cfg.FlushTimeout) + timer.Reset(nextFlush) } } }() @@ -103,9 +99,13 @@ func (bs *batchSender) Start(_ context.Context, _ component.Host) error { type batch struct { ctx context.Context - request Request + request internal.Request done chan struct{} err error + + // requestsBlocked is the number of requests blocked in this batch + // that can be immediately released from activeRequests when batch sending completes. + requestsBlocked int64 } func newEmptyBatch() *batch { @@ -117,32 +117,28 @@ func newEmptyBatch() *batch { // exportActiveBatch exports the active batch asynchronously and replaces it with a new one. // Caller must hold the lock. -func (bs *batchSender) exportActiveBatch() { +func (bs *BatchSender) exportActiveBatch() { go func(b *batch) { - b.err = b.request.Export(b.ctx) + b.err = bs.NextSender.Send(b.ctx, b.request) close(b.done) + bs.activeRequests.Add(-b.requestsBlocked) }(bs.activeBatch) + bs.lastFlushed = time.Now() bs.activeBatch = newEmptyBatch() } -func (bs *batchSender) resetTimer() { - if !bs.stopped.Load() { - bs.resetTimerCh <- struct{}{} - } -} - // isActiveBatchReady returns true if the active batch is ready to be exported. // The batch is ready if it has reached the minimum size or the concurrency limit is reached. // Caller must hold the lock. -func (bs *batchSender) isActiveBatchReady() bool { +func (bs *BatchSender) isActiveBatchReady() bool { return bs.activeBatch.request.ItemsCount() >= bs.cfg.MinSizeItems || (bs.concurrencyLimit > 0 && bs.activeRequests.Load() >= bs.concurrencyLimit) } -func (bs *batchSender) send(ctx context.Context, req Request) error { +func (bs *BatchSender) Send(ctx context.Context, req internal.Request) error { // Stopped batch sender should act as pass-through to allow the queue to be drained. if bs.stopped.Load() { - return bs.nextSender.send(ctx, req) + return bs.NextSender.Send(ctx, req) } if bs.cfg.MaxSizeItems > 0 { @@ -152,22 +148,34 @@ func (bs *batchSender) send(ctx context.Context, req Request) error { } // sendMergeSplitBatch sends the request to the batch which may be split into multiple requests. -func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) error { +func (bs *BatchSender) sendMergeSplitBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() - bs.activeRequests.Add(1) - defer bs.activeRequests.Add(^uint64(0)) - reqs, err := bs.mergeSplitFunc(ctx, bs.cfg.MaxSizeConfig, bs.activeBatch.request, req) - if err != nil || len(reqs) == 0 { + var reqs []internal.Request + var mergeSplitErr error + if bs.activeBatch.request == nil { + reqs, mergeSplitErr = req.MergeSplit(ctx, bs.cfg.MaxSizeConfig, nil) + } else { + reqs, mergeSplitErr = bs.activeBatch.request.MergeSplit(ctx, bs.cfg.MaxSizeConfig, req) + } + + if mergeSplitErr != nil || len(reqs) == 0 { bs.mu.Unlock() - return err + return mergeSplitErr + } + + bs.activeRequests.Add(1) + if len(reqs) == 1 { + bs.activeBatch.requestsBlocked++ + } else { + // if there was a split, we want to make sure that bs.activeRequests is released once all of the parts are sent instead of using batch.requestsBlocked + defer bs.activeRequests.Add(-1) } if len(reqs) == 1 || bs.activeBatch.request != nil { bs.updateActiveBatch(ctx, reqs[0]) batch := bs.activeBatch if bs.isActiveBatchReady() || len(reqs) > 1 { bs.exportActiveBatch() - bs.resetTimer() } bs.mu.Unlock() <-batch.done @@ -182,7 +190,7 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err // Intentionally do not put the last request in the active batch to not block it. // TODO: Consider including the partial request in the error to avoid double publishing. for _, r := range reqs { - if err := r.Export(ctx); err != nil { + if err := bs.NextSender.Send(ctx, r); err != nil { return err } } @@ -190,24 +198,24 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err } // sendMergeBatch sends the request to the batch and waits for the batch to be exported. -func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { +func (bs *BatchSender) sendMergeBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() - bs.activeRequests.Add(1) - defer bs.activeRequests.Add(^uint64(0)) if bs.activeBatch.request != nil { var err error - req, err = bs.mergeFunc(ctx, bs.activeBatch.request, req) + req, err = bs.activeBatch.request.Merge(ctx, req) if err != nil { bs.mu.Unlock() return err } } + + bs.activeRequests.Add(1) bs.updateActiveBatch(ctx, req) batch := bs.activeBatch + batch.requestsBlocked++ if bs.isActiveBatchReady() { bs.exportActiveBatch() - bs.resetTimer() } bs.mu.Unlock() <-batch.done @@ -218,16 +226,18 @@ func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { // The context is only set once and is not updated after the first call. // Merging the context would be complex and require an additional goroutine to handle the context cancellation. // We take the approach of using the context from the first request since it's likely to have the shortest timeout. -func (bs *batchSender) updateActiveBatch(ctx context.Context, req Request) { +func (bs *BatchSender) updateActiveBatch(ctx context.Context, req internal.Request) { if bs.activeBatch.request == nil { bs.activeBatch.ctx = ctx } bs.activeBatch.request = req } -func (bs *batchSender) Shutdown(context.Context) error { +func (bs *BatchSender) Shutdown(context.Context) error { bs.stopped.Store(true) - close(bs.shutdownCh) - <-bs.shutdownCompleteCh + if bs.shutdownCh != nil { + close(bs.shutdownCh) + <-bs.shutdownCompleteCh + } return nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 481cc2b27ec..41bea2f8ce1 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -3,6 +3,7 @@ package metadata import ( + "context" "errors" "go.opentelemetry.io/otel/metric" @@ -24,97 +25,136 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ExporterEnqueueFailedLogRecords metric.Int64Counter ExporterEnqueueFailedMetricPoints metric.Int64Counter ExporterEnqueueFailedSpans metric.Int64Counter + ExporterQueueCapacity metric.Int64ObservableGauge + ExporterQueueSize metric.Int64ObservableGauge ExporterSendFailedLogRecords metric.Int64Counter ExporterSendFailedMetricPoints metric.Int64Counter ExporterSendFailedSpans metric.Int64Counter ExporterSentLogRecords metric.Int64Counter ExporterSentMetricPoints metric.Int64Counter ExporterSentSpans metric.Int64Counter - level configtelemetry.Level } -// telemetryBuilderOption applies changes to default builder. -type telemetryBuilderOption func(*TelemetryBuilder) +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// InitExporterQueueCapacity configures the ExporterQueueCapacity metric. +func (builder *TelemetryBuilder) InitExporterQueueCapacity(cb func() int64, opts ...metric.ObserveOption) (metric.Registration, error) { + var err error + builder.ExporterQueueCapacity, err = builder.meter.Int64ObservableGauge( + "otelcol_exporter_queue_capacity", + metric.WithDescription("Fixed capacity of the retry queue (in batches)"), + metric.WithUnit("{batches}"), + ) + if err != nil { + return nil, err + } + reg, err := builder.meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ExporterQueueCapacity, cb(), opts...) + return nil + }, builder.ExporterQueueCapacity) + return reg, err +} -// WithLevel sets the current telemetry level for the component. -func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { - return func(builder *TelemetryBuilder) { - builder.level = lvl +// InitExporterQueueSize configures the ExporterQueueSize metric. +func (builder *TelemetryBuilder) InitExporterQueueSize(cb func() int64, opts ...metric.ObserveOption) (metric.Registration, error) { + var err error + builder.ExporterQueueSize, err = builder.meter.Int64ObservableGauge( + "otelcol_exporter_queue_size", + metric.WithDescription("Current size of the retry queue (in batches)"), + metric.WithUnit("{batches}"), + ) + if err != nil { + return nil, err } + reg, err := builder.meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ExporterQueueSize, cb(), opts...) + return nil + }, builder.ExporterQueueSize) + return reg, err } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{level: configtelemetry.LevelBasic} +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} for _, op := range options { - op(&builder) - } - var ( - err, errs error - meter metric.Meter - ) - if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) - } else { - meter = noop.Meter{} + op.apply(&builder) } - builder.ExporterEnqueueFailedLogRecords, err = meter.Int64Counter( - "exporter_enqueue_failed_log_records", - metric.WithDescription("Number of log records failed to be added to the sending queue."), - metric.WithUnit("1"), + builder.meter = Meter(settings) + var err, errs error + builder.ExporterEnqueueFailedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_enqueue_failed_log_records", + metric.WithDescription("Number of log records failed to be added to the sending queue. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedMetricPoints, err = meter.Int64Counter( - "exporter_enqueue_failed_metric_points", - metric.WithDescription("Number of metric points failed to be added to the sending queue."), - metric.WithUnit("1"), + builder.ExporterEnqueueFailedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_enqueue_failed_metric_points", + metric.WithDescription("Number of metric points failed to be added to the sending queue. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedSpans, err = meter.Int64Counter( - "exporter_enqueue_failed_spans", - metric.WithDescription("Number of spans failed to be added to the sending queue."), - metric.WithUnit("1"), + builder.ExporterEnqueueFailedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_enqueue_failed_spans", + metric.WithDescription("Number of spans failed to be added to the sending queue. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedLogRecords, err = meter.Int64Counter( - "exporter_send_failed_log_records", - metric.WithDescription("Number of log records in failed attempts to send to destination."), - metric.WithUnit("1"), + builder.ExporterSendFailedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_send_failed_log_records", + metric.WithDescription("Number of log records in failed attempts to send to destination. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedMetricPoints, err = meter.Int64Counter( - "exporter_send_failed_metric_points", - metric.WithDescription("Number of metric points in failed attempts to send to destination."), - metric.WithUnit("1"), + builder.ExporterSendFailedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_send_failed_metric_points", + metric.WithDescription("Number of metric points in failed attempts to send to destination. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedSpans, err = meter.Int64Counter( - "exporter_send_failed_spans", - metric.WithDescription("Number of spans in failed attempts to send to destination."), - metric.WithUnit("1"), + builder.ExporterSendFailedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_send_failed_spans", + metric.WithDescription("Number of spans in failed attempts to send to destination. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) - builder.ExporterSentLogRecords, err = meter.Int64Counter( - "exporter_sent_log_records", - metric.WithDescription("Number of log record successfully sent to destination."), - metric.WithUnit("1"), + builder.ExporterSentLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_sent_log_records", + metric.WithDescription("Number of log record successfully sent to destination. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ExporterSentMetricPoints, err = meter.Int64Counter( - "exporter_sent_metric_points", - metric.WithDescription("Number of metric points successfully sent to destination."), - metric.WithUnit("1"), + builder.ExporterSentMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_sent_metric_points", + metric.WithDescription("Number of metric points successfully sent to destination. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterSentSpans, err = meter.Int64Counter( - "exporter_sent_spans", - metric.WithDescription("Number of spans successfully sent to destination."), - metric.WithUnit("1"), + builder.ExporterSentSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_exporter_sent_spans", + metric.WithDescription("Number of spans successfully sent to destination. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs } + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go new file mode 100644 index 00000000000..004e5c48248 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsexporter.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" + "go.opentelemetry.io/collector/pipeline" +) + +// ObsReport is a helper to add observability to an exporter. +type ObsReport struct { + spanNamePrefix string + tracer trace.Tracer + Signal pipeline.Signal + + otelAttrs metric.MeasurementOption + TelemetryBuilder *metadata.TelemetryBuilder +} + +// ObsReportSettings are settings for creating an ObsReport. +type ObsReportSettings struct { + ExporterID component.ID + ExporterCreateSettings exporter.Settings + Signal pipeline.Signal +} + +func NewExporter(cfg ObsReportSettings) (*ObsReport, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) + if err != nil { + return nil, err + } + + return &ObsReport{ + spanNamePrefix: ExporterPrefix + cfg.ExporterID.String(), + tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), + Signal: cfg.Signal, + otelAttrs: metric.WithAttributeSet(attribute.NewSet(attribute.String(ExporterKey, cfg.ExporterID.String()))), + TelemetryBuilder: telemetryBuilder, + }, nil +} + +// StartTracesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) +} + +// EndTracesOp completes the export operation that was started with startTracesOp. +func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + or.recordMetrics(context.WithoutCancel(ctx), pipeline.SignalTraces, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, SentSpansKey, FailedToSendSpansKey) +} + +// StartMetricsOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportMetricsOperationSuffix) +} + +// EndMetricsOp completes the export operation that was started with +// startMetricsOp. +// +// If needed, report your use case in https://github.com/open-telemetry/opentelemetry-collector/issues/10592. +func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { + numSent, numFailedToSend := toNumItems(numMetricPoints, err) + or.recordMetrics(context.WithoutCancel(ctx), pipeline.SignalMetrics, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, SentMetricPointsKey, FailedToSendMetricPointsKey) +} + +// StartLogsOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportLogsOperationSuffix) +} + +// EndLogsOp completes the export operation that was started with startLogsOp. +func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { + numSent, numFailedToSend := toNumItems(numLogRecords, err) + or.recordMetrics(context.WithoutCancel(ctx), pipeline.SignalLogs, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, SentLogRecordsKey, FailedToSendLogRecordsKey) +} + +// StartProfilesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *ObsReport) StartProfilesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) +} + +// EndProfilesOp completes the export operation that was started with startProfilesOp. +func (or *ObsReport) EndProfilesOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + endSpan(ctx, err, numSent, numFailedToSend, SentSamplesKey, FailedToSendSamplesKey) +} + +// startOp creates the span used to trace the operation. Returning +// the updated context and the created span. +func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { + spanName := or.spanNamePrefix + operationSuffix + ctx, _ = or.tracer.Start(ctx, spanName) + return ctx +} + +func (or *ObsReport) recordMetrics(ctx context.Context, signal pipeline.Signal, sent, failed int64) { + var sentMeasure, failedMeasure metric.Int64Counter + switch signal { + case pipeline.SignalTraces: + sentMeasure = or.TelemetryBuilder.ExporterSentSpans + failedMeasure = or.TelemetryBuilder.ExporterSendFailedSpans + case pipeline.SignalMetrics: + sentMeasure = or.TelemetryBuilder.ExporterSentMetricPoints + failedMeasure = or.TelemetryBuilder.ExporterSendFailedMetricPoints + case pipeline.SignalLogs: + sentMeasure = or.TelemetryBuilder.ExporterSentLogRecords + failedMeasure = or.TelemetryBuilder.ExporterSendFailedLogRecords + } + + sentMeasure.Add(ctx, sent, or.otelAttrs) + failedMeasure.Add(ctx, failed, or.otelAttrs) +} + +func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { + span := trace.SpanFromContext(ctx) + // End the span according to errors. + if span.IsRecording() { + span.SetAttributes( + attribute.Int64(sentItemsKey, numSent), + attribute.Int64(failedToSendItemsKey, numFailedToSend), + ) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + } + span.End() +} + +func toNumItems(numExportedItems int, err error) (int64, int64) { + if err != nil { + return 0, int64(numExportedItems) + } + return int64(numExportedItems), 0 +} + +func (or *ObsReport) RecordEnqueueFailure(ctx context.Context, signal pipeline.Signal, failed int64) { + var enqueueFailedMeasure metric.Int64Counter + switch signal { + case pipeline.SignalTraces: + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedSpans + case pipeline.SignalMetrics: + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedMetricPoints + case pipeline.SignalLogs: + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedLogRecords + } + + enqueueFailedMeasure.Add(ctx, failed, or.otelAttrs) +} diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsmetrics.go similarity index 51% rename from vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_exporter.go rename to vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsmetrics.go index b87270f5e5a..ae9e89942b3 100644 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/obsmetrics.go @@ -1,38 +1,40 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" const ( + // spanNameSep is duplicate between receiver and exporter. + spanNameSep = "/" + // ExporterKey used to identify exporters in metrics and traces. ExporterKey = "exporter" + // DataTypeKey used to identify the data type in the queue size metric. + DataTypeKey = "data_type" + // SentSpansKey used to track spans sent by exporters. SentSpansKey = "sent_spans" // FailedToSendSpansKey used to track spans that failed to be sent by exporters. FailedToSendSpansKey = "send_failed_spans" - // FailedToEnqueueSpansKey used to track spans that failed to be enqueued by exporters. - FailedToEnqueueSpansKey = "enqueue_failed_spans" // SentMetricPointsKey used to track metric points sent by exporters. SentMetricPointsKey = "sent_metric_points" // FailedToSendMetricPointsKey used to track metric points that failed to be sent by exporters. FailedToSendMetricPointsKey = "send_failed_metric_points" - // FailedToEnqueueMetricPointsKey used to track metric points that failed to be enqueued by exporters. - FailedToEnqueueMetricPointsKey = "enqueue_failed_metric_points" // SentLogRecordsKey used to track logs sent by exporters. SentLogRecordsKey = "sent_log_records" // FailedToSendLogRecordsKey used to track logs that failed to be sent by exporters. FailedToSendLogRecordsKey = "send_failed_log_records" - // FailedToEnqueueLogRecordsKey used to track logs that failed to be enqueued by exporters. - FailedToEnqueueLogRecordsKey = "enqueue_failed_log_records" -) -var ( - ExporterPrefix = ExporterKey + SpanNameSep - ExporterMetricPrefix = ExporterKey + MetricNameSep - ExportTraceDataOperationSuffix = SpanNameSep + "traces" - ExportMetricsOperationSuffix = SpanNameSep + "metrics" - ExportLogsOperationSuffix = SpanNameSep + "logs" + // SentSamplesKey used to track profiles samples sent by exporters. + SentSamplesKey = "sent_samples" + // FailedToSendSamplesKey used to track samples that failed to be sent by exporters. + FailedToSendSamplesKey = "send_failed_samples" + + ExporterPrefix = ExporterKey + spanNameSep + ExportTraceDataOperationSuffix = spanNameSep + "traces" + ExportMetricsOperationSuffix = spanNameSep + "metrics" + ExportLogsOperationSuffix = spanNameSep + "logs" ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go new file mode 100644 index 00000000000..509c747115b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/queue_sender.go @@ -0,0 +1,200 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/queue" +) + +const defaultQueueSize = 1000 + +// QueueConfig defines configuration for queueing batches before sending to the consumerSender. +type QueueConfig struct { + // Enabled indicates whether to not enqueue batches before sending to the consumerSender. + Enabled bool `mapstructure:"enabled"` + // NumConsumers is the number of consumers from the queue. Defaults to 10. + // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. + // So it's recommended to set higher number of consumers if batching is enabled. + NumConsumers int `mapstructure:"num_consumers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` + // StorageID if not empty, enables the persistent storage and uses the component specified + // as a storage extension for the persistent queue + StorageID *component.ID `mapstructure:"storage"` +} + +// NewDefaultQueueConfig returns the default config for QueueConfig. +func NewDefaultQueueConfig() QueueConfig { + return QueueConfig{ + Enabled: true, + NumConsumers: 10, + // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue + // This can be estimated at 1-4 GB worth of maximum memory usage + // This default is probably still too high, and may be adjusted further down in a future release + QueueSize: defaultQueueSize, + } +} + +// Validate checks if the QueueConfig configuration is valid +func (qCfg *QueueConfig) Validate() error { + if !qCfg.Enabled { + return nil + } + + if qCfg.QueueSize <= 0 { + return errors.New("queue size must be positive") + } + + if qCfg.NumConsumers <= 0 { + return errors.New("number of queue consumers must be positive") + } + + return nil +} + +type QueueSender struct { + BaseRequestSender + queue exporterqueue.Queue[internal.Request] + numConsumers int + traceAttribute attribute.KeyValue + batcher queue.Batcher + consumers *queue.Consumers[internal.Request] + + obsrep *ObsReport + exporterID component.ID + logger *zap.Logger + shutdownFns []component.ShutdownFunc +} + +func NewQueueSender( + q exporterqueue.Queue[internal.Request], + set exporter.Settings, + numConsumers int, + exportFailureMessage string, + obsrep *ObsReport, + batcherCfg exporterbatcher.Config, +) *QueueSender { + qs := &QueueSender{ + queue: q, + numConsumers: numConsumers, + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + obsrep: obsrep, + exporterID: set.ID, + logger: set.Logger, + } + + exportFunc := func(ctx context.Context, req internal.Request) error { + err := qs.NextSender.Send(ctx, req) + if err != nil { + set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, + zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) + } + return err + } + if usePullingBasedExporterQueueBatcher.IsEnabled() { + qs.batcher, _ = queue.NewBatcher(batcherCfg, q, exportFunc, numConsumers) + } else { + qs.consumers = queue.NewQueueConsumers[internal.Request](q, numConsumers, exportFunc) + } + return qs +} + +// Start is invoked during service startup. +func (qs *QueueSender) Start(ctx context.Context, host component.Host) error { + if err := qs.queue.Start(ctx, host); err != nil { + return err + } + + if usePullingBasedExporterQueueBatcher.IsEnabled() { + if err := qs.batcher.Start(ctx, host); err != nil { + return err + } + } else { + if err := qs.consumers.Start(ctx, host); err != nil { + return err + } + } + + dataTypeAttr := attribute.String(DataTypeKey, qs.obsrep.Signal.String()) + + reg1, err1 := qs.obsrep.TelemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))) + + if reg1 != nil { + qs.shutdownFns = append(qs.shutdownFns, func(context.Context) error { + return reg1.Unregister() + }) + } + + reg2, err2 := qs.obsrep.TelemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))) + + if reg2 != nil { + qs.shutdownFns = append(qs.shutdownFns, func(context.Context) error { + return reg2.Unregister() + }) + } + + return errors.Join(err1, err2) +} + +// Shutdown is invoked during service shutdown. +func (qs *QueueSender) Shutdown(ctx context.Context) error { + // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only + // try once every request. + + for _, fn := range qs.shutdownFns { + err := fn(ctx) + if err != nil { + qs.logger.Warn("Error while shutting down QueueSender", zap.Error(err)) + } + } + qs.shutdownFns = nil + + if err := qs.queue.Shutdown(ctx); err != nil { + return err + } + if usePullingBasedExporterQueueBatcher.IsEnabled() { + return qs.batcher.Shutdown(ctx) + } + return qs.consumers.Shutdown(ctx) +} + +// send implements the requestSender interface. It puts the request in the queue. +func (qs *QueueSender) Send(ctx context.Context, req internal.Request) error { + // Prevent cancellation and deadline to propagate to the context stored in the queue. + // The grpc/http based receivers will cancel the request context after this function returns. + c := context.WithoutCancel(ctx) + + span := trace.SpanFromContext(c) + if err := qs.queue.Offer(c, req); err != nil { + span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) + return err + } + + span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) + return nil +} + +type MockHost struct { + component.Host + Ext map[component.ID]component.Component +} + +func (nh *MockHost) GetExtensions() map[component.ID]component.Component { + return nh.Ext +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go new file mode 100644 index 00000000000..85bbf3311b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "sync/atomic" + "time" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +type fakeRequestSink struct { + requestsCount *atomic.Int64 + itemsCount *atomic.Int64 +} + +func newFakeRequestSink() *fakeRequestSink { + return &fakeRequestSink{ + requestsCount: new(atomic.Int64), + itemsCount: new(atomic.Int64), + } +} + +type fakeRequest struct { + items int + exportErr error + mergeErr error + delay time.Duration + sink *fakeRequestSink +} + +func (r *fakeRequest) Export(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(r.delay): + } + if r.exportErr != nil { + return r.exportErr + } + if r.sink != nil { + r.sink.requestsCount.Add(1) + r.sink.itemsCount.Add(int64(r.items)) + } + return nil +} + +func (r *fakeRequest) ItemsCount() int { + return r.items +} + +func (r *fakeRequest) Merge(_ context.Context, + r2 internal.Request, +) (internal.Request, error) { + if r == nil { + return r2, nil + } + fr2 := r2.(*fakeRequest) + if fr2.mergeErr != nil { + return nil, fr2.mergeErr + } + return &fakeRequest{ + items: r.items + fr2.items, + sink: r.sink, + exportErr: fr2.exportErr, + delay: r.delay + fr2.delay, + }, nil +} + +func (r *fakeRequest) MergeSplit(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, + r2 internal.Request, +) ([]internal.Request, error) { + if r.mergeErr != nil { + return nil, r.mergeErr + } + + maxItems := cfg.MaxSizeItems + if maxItems == 0 { + r, err := r.Merge(ctx, r2) + return []internal.Request{r}, err + } + + var fr2 *fakeRequest + if r2 == nil { + fr2 = &fakeRequest{sink: r.sink, exportErr: r.exportErr, delay: r.delay} + } else { + if r2.(*fakeRequest).mergeErr != nil { + return nil, r2.(*fakeRequest).mergeErr + } + fr2 = r2.(*fakeRequest) + fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} + } + var res []internal.Request + + // fill fr1 to maxItems if it's not nil + + r = &fakeRequest{items: r.items, sink: r.sink, exportErr: r.exportErr, delay: r.delay} + if fr2.items <= maxItems-r.items { + r.items += fr2.items + if fr2.exportErr != nil { + r.exportErr = fr2.exportErr + } + return []internal.Request{r}, nil + } + // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases + fr2.items -= maxItems - r.items + r.items = maxItems + res = append(res, r) + + // split fr2 to maxItems + for { + if fr2.items <= maxItems { + res = append(res, &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) + break + } + res = append(res, &fakeRequest{items: maxItems, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay}) + fr2.items -= maxItems + } + + return res, nil +} + +func RequestFromMetricsFunc(reqErr error) func(context.Context, pmetric.Metrics) (internal.Request, error) { + return func(_ context.Context, md pmetric.Metrics) (internal.Request, error) { + return &fakeRequest{items: md.DataPointCount(), exportErr: reqErr}, nil + } +} + +func RequestFromTracesFunc(reqErr error) func(context.Context, ptrace.Traces) (internal.Request, error) { + return func(_ context.Context, td ptrace.Traces) (internal.Request, error) { + return &fakeRequest{items: td.SpanCount(), exportErr: reqErr}, nil + } +} + +func RequestFromLogsFunc(reqErr error) func(context.Context, plog.Logs) (internal.Request, error) { + return func(_ context.Context, ld plog.Logs) (internal.Request, error) { + return &fakeRequest{items: ld.LogRecordCount(), exportErr: reqErr}, nil + } +} + +func RequestFromProfilesFunc(reqErr error) func(context.Context, pprofile.Profiles) (internal.Request, error) { + return func(_ context.Context, pd pprofile.Profiles) (internal.Request, error) { + return &fakeRequest{items: pd.SampleCount(), exportErr: reqErr}, nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go new file mode 100644 index 00000000000..683aca40d79 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/request_sender.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" // RequestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/internal" +) + +type RequestSender interface { + component.Component + Send(context.Context, internal.Request) error + SetNextSender(nextSender RequestSender) +} + +type BaseRequestSender struct { + component.StartFunc + component.ShutdownFunc + NextSender RequestSender +} + +var _ RequestSender = (*BaseRequestSender)(nil) + +func (b *BaseRequestSender) Send(ctx context.Context, req internal.Request) error { + return b.NextSender.Send(ctx, req) +} + +func (b *BaseRequestSender) SetNextSender(nextSender RequestSender) { + b.NextSender = nextSender +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go new file mode 100644 index 00000000000..db5d96d3fa5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/retry_sender.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/experr" +) + +// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. +type throttleRetry struct { + err error + delay time.Duration +} + +func (t throttleRetry) Error() string { + return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() +} + +func (t throttleRetry) Unwrap() error { + return t.err +} + +// NewThrottleRetry creates a new throttle retry error. +func NewThrottleRetry(err error, delay time.Duration) error { + return throttleRetry{ + err: err, + delay: delay, + } +} + +type retrySender struct { + BaseRequestSender + traceAttribute attribute.KeyValue + cfg configretry.BackOffConfig + stopCh chan struct{} + logger *zap.Logger +} + +func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { + return &retrySender{ + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + cfg: config, + stopCh: make(chan struct{}), + logger: set.Logger, + } +} + +func (rs *retrySender) Shutdown(context.Context) error { + close(rs.stopCh) + return nil +} + +// send implements the requestSender interface +func (rs *retrySender) Send(ctx context.Context, req internal.Request) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here must + // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). + expBackoff := backoff.ExponentialBackOff{ + InitialInterval: rs.cfg.InitialInterval, + RandomizationFactor: rs.cfg.RandomizationFactor, + Multiplier: rs.cfg.Multiplier, + MaxInterval: rs.cfg.MaxInterval, + MaxElapsedTime: rs.cfg.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + expBackoff.Reset() + span := trace.SpanFromContext(ctx) + retryNum := int64(0) + for { + span.AddEvent( + "Sending request.", + trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) + + err := rs.NextSender.Send(ctx, req) + if err == nil { + return nil + } + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + return fmt.Errorf("not retryable error: %w", err) + } + + if errReq, ok := req.(internal.RequestErrorHandler); ok { + req = errReq.OnError(err) + } + + backoffDelay := expBackoff.NextBackOff() + if backoffDelay == backoff.Stop { + return fmt.Errorf("no more retries left: %w", err) + } + + throttleErr := throttleRetry{} + if errors.As(err, &throttleErr) { + backoffDelay = max(backoffDelay, throttleErr.delay) + } + + if deadline, has := ctx.Deadline(); has && time.Until(deadline) < backoffDelay { + // The delay is longer than the deadline. There is no point in + // waiting for cancelation. + return fmt.Errorf("request will be cancelled before next retry: %w", err) + } + + backoffDelayStr := backoffDelay.String() + span.AddEvent( + "Exporting failed. Will retry the request after interval.", + trace.WithAttributes( + rs.traceAttribute, + attribute.String("interval", backoffDelayStr), + attribute.String("error", err.Error()))) + rs.logger.Info( + "Exporting failed. Will retry the request after interval.", + zap.Error(err), + zap.String("interval", backoffDelayStr), + ) + retryNum++ + + // back-off, but get interrupted when shutting down or request is cancelled or timed out. + select { + case <-ctx.Done(): + return fmt.Errorf("request is cancelled or timed out %w", err) + case <-rs.stopCh: + return experr.NewShutdownErr(err) + case <-time.After(backoffDelay): + } + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go new file mode 100644 index 00000000000..6b94ccf2d65 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/test_util.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/featuregate" +) + +func setFeatureGateForTest(t testing.TB, gate *featuregate.Gate, enabled bool) func() { + originalValue := gate.IsEnabled() + require.NoError(t, featuregate.GlobalRegistry().Set(gate.ID(), enabled)) + return func() { + require.NoError(t, featuregate.GlobalRegistry().Set(gate.ID(), originalValue)) + } +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go new file mode 100644 index 00000000000..5abae1b6746 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/internal/timeout_sender.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/exporter/internal" +) + +// TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. +type TimeoutConfig struct { + // Timeout is the timeout for every attempt to send data to the backend. + // A zero timeout means no timeout. + Timeout time.Duration `mapstructure:"timeout"` +} + +func (ts *TimeoutConfig) Validate() error { + // Negative timeouts are not acceptable, since all sends will fail. + if ts.Timeout < 0 { + return errors.New("'timeout' must be non-negative") + } + return nil +} + +// NewDefaultTimeoutConfig returns the default config for TimeoutConfig. +func NewDefaultTimeoutConfig() TimeoutConfig { + return TimeoutConfig{ + Timeout: 5 * time.Second, + } +} + +// TimeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. +type TimeoutSender struct { + BaseRequestSender + cfg TimeoutConfig +} + +func (ts *TimeoutSender) Send(ctx context.Context, req internal.Request) error { + // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. + if ts.cfg.Timeout == 0 { + return req.Export(ctx) + } + // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be + // updated because this deadline most likely is before the next one. + tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) + defer cancelFunc() + return req.Export(tCtx) +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go index 2706ec7fe92..55652dd7b4a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs.go @@ -13,13 +13,17 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pipeline" ) -var logsMarshaler = &plog.ProtoMarshaler{} -var logsUnmarshaler = &plog.ProtoUnmarshaler{} +var ( + logsMarshaler = &plog.ProtoMarshaler{} + logsUnmarshaler = &plog.ProtoUnmarshaler{} +) type logsRequest struct { ld plog.Logs @@ -64,14 +68,14 @@ func (req *logsRequest) ItemsCount() int { } type logsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Logs } -// NewLogsExporter creates an exporter.Logs that records observability metrics and wraps every request with a Span. -func NewLogsExporter( +// NewLogs creates an exporter.Logs that records observability metrics and wraps every request with a Span. +func NewLogs( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeLogsFunc, options ...Option, @@ -83,10 +87,9 @@ func NewLogsExporter( return nil, errNilPushLogsData } logsOpts := []Option{ - withMarshaler(logsRequestMarshaler), withUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeLogs, mergeSplitLogs), + internal.WithMarshaler(logsRequestMarshaler), internal.WithUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), } - return NewLogsRequestExporter(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) + return NewLogsRequest(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) } // RequestFromLogsFunc converts plog.Logs data into a user-defined request. @@ -101,12 +104,12 @@ func requestFromLogs(pusher consumer.ConsumeLogsFunc) RequestFromLogsFunc { } } -// NewLogsRequestExporter creates new logs exporter based on custom LogsConverter and RequestSender. +// NewLogsRequest creates new logs exporter based on custom LogsConverter and RequestSender. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewLogsRequestExporter( +func NewLogsRequest( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromLogsFunc, options ...Option, ) (exporter.Logs, error) { @@ -118,7 +121,7 @@ func NewLogsRequestExporter( return nil, errNilLogsConverter } - be, err := newBaseExporter(set, component.DataTypeLogs, newLogsExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, pipeline.SignalLogs, newLogsWithObservability, options...) if err != nil { return nil, err } @@ -131,31 +134,32 @@ func NewLogsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeLogs, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalLogs, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &logsExporter{ - baseExporter: be, + BaseExporter: be, Logs: lc, }, err } type logsExporterWithObservability struct { - baseRequestSender - obsrep *ObsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newLogsExporterWithObservability(obsrep *ObsReport) requestSender { +func newLogsWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &logsExporterWithObservability{obsrep: obsrep} } -func (lewo *logsExporterWithObservability) send(ctx context.Context, req Request) error { +func (lewo *logsExporterWithObservability) Send(ctx context.Context, req Request) error { c := lewo.obsrep.StartLogsOp(ctx) - err := lewo.nextSender.send(c, req) - lewo.obsrep.EndLogsOp(c, req.ItemsCount(), err) + numLogRecords := req.ItemsCount() + err := lewo.NextSender.Send(c, req) + lewo.obsrep.EndLogsOp(c, numLogRecords, err) return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go index 296538bc0e0..3e5b1330de9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/logs_batch.go @@ -11,25 +11,25 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) -// mergeLogs merges two logs requests into one. -func mergeLogs(_ context.Context, r1 Request, r2 Request) (Request, error) { - lr1, ok1 := r1.(*logsRequest) +// Merge merges the provided logs request into the current request and returns the merged request. +func (req *logsRequest) Merge(_ context.Context, r2 Request) (Request, error) { lr2, ok2 := r2.(*logsRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - lr2.ld.ResourceLogs().MoveAndAppendTo(lr1.ld.ResourceLogs()) - return lr1, nil + lr2.ld.ResourceLogs().MoveAndAppendTo(req.ld.ResourceLogs()) + return req, nil } -// mergeSplitLogs splits and/or merges the logs into multiple requests based on the MaxSizeConfig. -func mergeSplitLogs(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +// MergeSplit splits and/or merges the provided logs request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *logsRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { var ( res []Request destReq *logsRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []Request{req, r2} { if req == nil { continue } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml index 26b06a37536..9ce34e0306a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metadata.yaml @@ -1,82 +1,122 @@ type: exporterhelper +github_project: open-telemetry/opentelemetry-collector status: class: exporter not_component: true stability: beta: [traces, metrics, logs] - distributions: [core, contrib] telemetry: metrics: exporter_sent_spans: enabled: true + stability: + level: alpha description: Number of spans successfully sent to destination. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true exporter_send_failed_spans: enabled: true + stability: + level: alpha description: Number of spans in failed attempts to send to destination. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true exporter_enqueue_failed_spans: enabled: true + stability: + level: alpha description: Number of spans failed to be added to the sending queue. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true exporter_sent_metric_points: enabled: true + stability: + level: alpha description: Number of metric points successfully sent to destination. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true exporter_send_failed_metric_points: enabled: true + stability: + level: alpha description: Number of metric points in failed attempts to send to destination. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true exporter_enqueue_failed_metric_points: enabled: true + stability: + level: alpha description: Number of metric points failed to be added to the sending queue. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true exporter_sent_log_records: enabled: true + stability: + level: alpha description: Number of log record successfully sent to destination. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true exporter_send_failed_log_records: enabled: true + stability: + level: alpha description: Number of log records in failed attempts to send to destination. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true exporter_enqueue_failed_log_records: enabled: true + stability: + level: alpha description: Number of log records failed to be added to the sending queue. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true + + exporter_queue_size: + enabled: true + stability: + level: alpha + description: Current size of the retry queue (in batches) + unit: "{batches}" + optional: true + gauge: + value_type: int + async: true + + exporter_queue_capacity: + enabled: true + stability: + level: alpha + description: Fixed capacity of the retry queue (in batches) + unit: "{batches}" + optional: true + gauge: + value_type: int + async: true diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go index 83db18229dd..f84ed8c226e 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics.go @@ -13,13 +13,17 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pipeline" ) -var metricsMarshaler = &pmetric.ProtoMarshaler{} -var metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} +var ( + metricsMarshaler = &pmetric.ProtoMarshaler{} + metricsUnmarshaler = &pmetric.ProtoUnmarshaler{} +) type metricsRequest struct { md pmetric.Metrics @@ -64,14 +68,14 @@ func (req *metricsRequest) ItemsCount() int { } type metricsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Metrics } -// NewMetricsExporter creates an exporter.Metrics that records observability metrics and wraps every request with a Span. -func NewMetricsExporter( +// NewMetrics creates an exporter.Metrics that records observability metrics and wraps every request with a Span. +func NewMetrics( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeMetricsFunc, options ...Option, @@ -83,10 +87,9 @@ func NewMetricsExporter( return nil, errNilPushMetricsData } metricsOpts := []Option{ - withMarshaler(metricsRequestMarshaler), withUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeMetrics, mergeSplitMetrics), + internal.WithMarshaler(metricsRequestMarshaler), internal.WithUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), } - return NewMetricsRequestExporter(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) + return NewMetricsRequest(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) } // RequestFromMetricsFunc converts pdata.Metrics into a user-defined request. @@ -101,12 +104,12 @@ func requestFromMetrics(pusher consumer.ConsumeMetricsFunc) RequestFromMetricsFu } } -// NewMetricsRequestExporter creates a new metrics exporter based on a custom MetricsConverter and RequestSender. +// NewMetricsRequest creates a new metrics exporter based on a custom MetricsConverter and RequestSender. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewMetricsRequestExporter( +func NewMetricsRequest( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromMetricsFunc, options ...Option, ) (exporter.Metrics, error) { @@ -118,7 +121,7 @@ func NewMetricsRequestExporter( return nil, errNilMetricsConverter } - be, err := newBaseExporter(set, component.DataTypeMetrics, newMetricsSenderWithObservability, options...) + be, err := internal.NewBaseExporter(set, pipeline.SignalMetrics, newMetricsSenderWithObservability, options...) if err != nil { return nil, err } @@ -131,31 +134,32 @@ func NewMetricsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeMetrics, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalMetrics, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &metricsExporter{ - baseExporter: be, + BaseExporter: be, Metrics: mc, }, err } type metricsSenderWithObservability struct { - baseRequestSender - obsrep *ObsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newMetricsSenderWithObservability(obsrep *ObsReport) requestSender { +func newMetricsSenderWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &metricsSenderWithObservability{obsrep: obsrep} } -func (mewo *metricsSenderWithObservability) send(ctx context.Context, req Request) error { +func (mewo *metricsSenderWithObservability) Send(ctx context.Context, req Request) error { c := mewo.obsrep.StartMetricsOp(ctx) - err := mewo.nextSender.send(c, req) - mewo.obsrep.EndMetricsOp(c, req.ItemsCount(), err) + numMetricDataPoints := req.ItemsCount() + err := mewo.NextSender.Send(c, req) + mewo.obsrep.EndMetricsOp(c, numMetricDataPoints, err) return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go index 1a6448c8496..6269576801f 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/metrics_batch.go @@ -11,25 +11,25 @@ import ( "go.opentelemetry.io/collector/pdata/pmetric" ) -// mergeMetrics merges two metrics requests into one. -func mergeMetrics(_ context.Context, r1 Request, r2 Request) (Request, error) { - mr1, ok1 := r1.(*metricsRequest) +// Merge merges the provided metrics request into the current request and returns the merged request. +func (req *metricsRequest) Merge(_ context.Context, r2 Request) (Request, error) { mr2, ok2 := r2.(*metricsRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - mr2.md.ResourceMetrics().MoveAndAppendTo(mr1.md.ResourceMetrics()) - return mr1, nil + mr2.md.ResourceMetrics().MoveAndAppendTo(req.md.ResourceMetrics()) + return req, nil } -// mergeSplitMetrics splits and/or merges the metrics into multiple requests based on the MaxSizeConfig. -func mergeSplitMetrics(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +// MergeSplit splits and/or merges the provided metrics request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *metricsRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { var ( res []Request destReq *metricsRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []Request{req, r2} { if req == nil { continue } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go deleted file mode 100644 index e3a78c34b04..00000000000 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/obsexporter.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" -) - -// ObsReport is a helper to add observability to an exporter. -type ObsReport struct { - level configtelemetry.Level - spanNamePrefix string - tracer trace.Tracer - logger *zap.Logger - - otelAttrs []attribute.KeyValue - telemetryBuilder *metadata.TelemetryBuilder -} - -// ObsReportSettings are settings for creating an ObsReport. -type ObsReportSettings struct { - ExporterID component.ID - ExporterCreateSettings exporter.CreateSettings -} - -// NewObsReport creates a new Exporter. -func NewObsReport(cfg ObsReportSettings) (*ObsReport, error) { - return newExporter(cfg) -} - -func newExporter(cfg ObsReportSettings) (*ObsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) - if err != nil { - return nil, err - } - - return &ObsReport{ - level: cfg.ExporterCreateSettings.TelemetrySettings.MetricsLevel, - spanNamePrefix: obsmetrics.ExporterPrefix + cfg.ExporterID.String(), - tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), - logger: cfg.ExporterCreateSettings.Logger, - - otelAttrs: []attribute.KeyValue{ - attribute.String(obsmetrics.ExporterKey, cfg.ExporterID.String()), - }, - telemetryBuilder: telemetryBuilder, - }, nil -} - -// StartTracesOp is called at the start of an Export operation. -// The returned context should be used in other calls to the Exporter functions -// dealing with the same export operation. -func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportTraceDataOperationSuffix) -} - -// EndTracesOp completes the export operation that was started with StartTracesOp. -func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { - numSent, numFailedToSend := toNumItems(numSpans, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeTraces, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSpansKey, obsmetrics.FailedToSendSpansKey) -} - -// StartMetricsOp is called at the start of an Export operation. -// The returned context should be used in other calls to the Exporter functions -// dealing with the same export operation. -func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportMetricsOperationSuffix) -} - -// EndMetricsOp completes the export operation that was started with -// StartMetricsOp. -func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { - numSent, numFailedToSend := toNumItems(numMetricPoints, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeMetrics, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentMetricPointsKey, obsmetrics.FailedToSendMetricPointsKey) -} - -// StartLogsOp is called at the start of an Export operation. -// The returned context should be used in other calls to the Exporter functions -// dealing with the same export operation. -func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportLogsOperationSuffix) -} - -// EndLogsOp completes the export operation that was started with StartLogsOp. -func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { - numSent, numFailedToSend := toNumItems(numLogRecords, err) - or.recordMetrics(noCancellationContext{Context: ctx}, component.DataTypeLogs, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) -} - -// startOp creates the span used to trace the operation. Returning -// the updated context and the created span. -func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { - spanName := or.spanNamePrefix + operationSuffix - ctx, _ = or.tracer.Start(ctx, spanName) - return ctx -} - -func (or *ObsReport) recordMetrics(ctx context.Context, dataType component.DataType, sent, failed int64) { - if or.level == configtelemetry.LevelNone { - return - } - var sentMeasure, failedMeasure metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - sentMeasure = or.telemetryBuilder.ExporterSentSpans - failedMeasure = or.telemetryBuilder.ExporterSendFailedSpans - case component.DataTypeMetrics: - sentMeasure = or.telemetryBuilder.ExporterSentMetricPoints - failedMeasure = or.telemetryBuilder.ExporterSendFailedMetricPoints - case component.DataTypeLogs: - sentMeasure = or.telemetryBuilder.ExporterSentLogRecords - failedMeasure = or.telemetryBuilder.ExporterSendFailedLogRecords - } - - sentMeasure.Add(ctx, sent, metric.WithAttributes(or.otelAttrs...)) - failedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) -} - -func endSpan(ctx context.Context, err error, numSent, numFailedToSend int64, sentItemsKey, failedToSendItemsKey string) { - span := trace.SpanFromContext(ctx) - // End the span according to errors. - if span.IsRecording() { - span.SetAttributes( - attribute.Int64(sentItemsKey, numSent), - attribute.Int64(failedToSendItemsKey, numFailedToSend), - ) - if err != nil { - span.SetStatus(codes.Error, err.Error()) - } - } - span.End() -} - -func toNumItems(numExportedItems int, err error) (int64, int64) { - if err != nil { - return 0, int64(numExportedItems) - } - return int64(numExportedItems), 0 -} - -func (or *ObsReport) recordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) { - var enqueueFailedMeasure metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedSpans - case component.DataTypeMetrics: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedMetricPoints - case component.DataTypeLogs: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedLogRecords - } - - enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go index 3e539d7f9a0..f09932c5b86 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/queue_sender.go @@ -3,179 +3,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" -import ( - "context" - "errors" - "time" +import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/otel/attribute" - otelmetric "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/multierr" - "go.uber.org/zap" +// QueueConfig defines configuration for queueing batches before sending to the consumerSender. +type QueueConfig = internal.QueueConfig - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" -) - -const defaultQueueSize = 1000 - -var ( - scopeName = "go.opentelemetry.io/collector/exporterhelper" -) - -// QueueSettings defines configuration for queueing batches before sending to the consumerSender. -type QueueSettings struct { - // Enabled indicates whether to not enqueue batches before sending to the consumerSender. - Enabled bool `mapstructure:"enabled"` - // NumConsumers is the number of consumers from the queue. Defaults to 10. - // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. - // So it's recommended to set higher number of consumers if batching is enabled. - NumConsumers int `mapstructure:"num_consumers"` - // QueueSize is the maximum number of batches allowed in queue at a given time. - QueueSize int `mapstructure:"queue_size"` - // StorageID if not empty, enables the persistent storage and uses the component specified - // as a storage extension for the persistent queue - StorageID *component.ID `mapstructure:"storage"` -} - -// NewDefaultQueueSettings returns the default settings for QueueSettings. -func NewDefaultQueueSettings() QueueSettings { - return QueueSettings{ - Enabled: true, - NumConsumers: 10, - // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue - // This can be estimated at 1-4 GB worth of maximum memory usage - // This default is probably still too high, and may be adjusted further down in a future release - QueueSize: defaultQueueSize, - } -} - -// Validate checks if the QueueSettings configuration is valid -func (qCfg *QueueSettings) Validate() error { - if !qCfg.Enabled { - return nil - } - - if qCfg.QueueSize <= 0 { - return errors.New("queue size must be positive") - } - - if qCfg.NumConsumers <= 0 { - return errors.New("number of queue consumers must be positive") - } - - return nil -} - -type queueSender struct { - baseRequestSender - fullName string - queue exporterqueue.Queue[Request] - numConsumers int - traceAttribute attribute.KeyValue - logger *zap.Logger - meter otelmetric.Meter - consumers *queue.Consumers[Request] - - metricCapacity otelmetric.Int64ObservableGauge - metricSize otelmetric.Int64ObservableGauge -} - -func newQueueSender(q exporterqueue.Queue[Request], set exporter.CreateSettings, numConsumers int, - exportFailureMessage string) *queueSender { - qs := &queueSender{ - fullName: set.ID.String(), - queue: q, - numConsumers: numConsumers, - traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), - logger: set.TelemetrySettings.Logger, - meter: set.TelemetrySettings.MeterProvider.Meter(scopeName), - } - consumeFunc := func(ctx context.Context, req Request) error { - err := qs.nextSender.send(ctx, req) - if err != nil { - set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, - zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) - } - return err - } - qs.consumers = queue.NewQueueConsumers[Request](q, numConsumers, consumeFunc) - return qs -} - -// Start is invoked during service startup. -func (qs *queueSender) Start(ctx context.Context, host component.Host) error { - if err := qs.consumers.Start(ctx, host); err != nil { - return err - } - - var err, errs error - - attrs := otelmetric.WithAttributeSet(attribute.NewSet(attribute.String(obsmetrics.ExporterKey, qs.fullName))) - - qs.metricSize, err = qs.meter.Int64ObservableGauge( - obsmetrics.ExporterKey+"/queue_size", - otelmetric.WithDescription("Current size of the retry queue (in batches)"), - otelmetric.WithUnit("1"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(int64(qs.queue.Size()), attrs) - return nil - }), - ) - errs = multierr.Append(errs, err) - - qs.metricCapacity, err = qs.meter.Int64ObservableGauge( - obsmetrics.ExporterKey+"/queue_capacity", - otelmetric.WithDescription("Fixed capacity of the retry queue (in batches)"), - otelmetric.WithUnit("1"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(int64(qs.queue.Capacity()), attrs) - return nil - })) - - errs = multierr.Append(errs, err) - return errs -} - -// Shutdown is invoked during service shutdown. -func (qs *queueSender) Shutdown(ctx context.Context) error { - // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only - // try once every request. - return qs.consumers.Shutdown(ctx) -} - -// send implements the requestSender interface. It puts the request in the queue. -func (qs *queueSender) send(ctx context.Context, req Request) error { - // Prevent cancellation and deadline to propagate to the context stored in the queue. - // The grpc/http based receivers will cancel the request context after this function returns. - c := noCancellationContext{Context: ctx} - - span := trace.SpanFromContext(c) - if err := qs.queue.Offer(c, req); err != nil { - span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) - return err - } - - span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) - return nil -} - -type noCancellationContext struct { - context.Context -} - -func (noCancellationContext) Deadline() (deadline time.Time, ok bool) { - return -} - -func (noCancellationContext) Done() <-chan struct{} { - return nil -} - -func (noCancellationContext) Err() error { - return nil +// NewDefaultQueueConfig returns the default config for QueueConfig. +func NewDefaultQueueConfig() QueueConfig { + return internal.NewDefaultQueueConfig() } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go index 6e8a36f9ef4..5b4476bb1f6 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/retry_sender.go @@ -4,139 +4,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "fmt" "time" - "github.com/cenkalti/backoff/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/internal/experr" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) -// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. -type throttleRetry struct { - err error - delay time.Duration -} - -func (t throttleRetry) Error() string { - return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() -} - -func (t throttleRetry) Unwrap() error { - return t.err -} - // NewThrottleRetry creates a new throttle retry error. func NewThrottleRetry(err error, delay time.Duration) error { - return throttleRetry{ - err: err, - delay: delay, - } -} - -type retrySender struct { - baseRequestSender - traceAttribute attribute.KeyValue - cfg configretry.BackOffConfig - stopCh chan struct{} - logger *zap.Logger -} - -func newRetrySender(config configretry.BackOffConfig, set exporter.CreateSettings) *retrySender { - return &retrySender{ - traceAttribute: attribute.String(obsmetrics.ExporterKey, set.ID.String()), - cfg: config, - stopCh: make(chan struct{}), - logger: set.Logger, - } -} - -func (rs *retrySender) Shutdown(context.Context) error { - close(rs.stopCh) - return nil -} - -// send implements the requestSender interface -func (rs *retrySender) send(ctx context.Context, req Request) error { - // Do not use NewExponentialBackOff since it calls Reset and the code here must - // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). - expBackoff := backoff.ExponentialBackOff{ - InitialInterval: rs.cfg.InitialInterval, - RandomizationFactor: rs.cfg.RandomizationFactor, - Multiplier: rs.cfg.Multiplier, - MaxInterval: rs.cfg.MaxInterval, - MaxElapsedTime: rs.cfg.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, - } - expBackoff.Reset() - span := trace.SpanFromContext(ctx) - retryNum := int64(0) - for { - span.AddEvent( - "Sending request.", - trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) - - err := rs.nextSender.send(ctx, req) - if err == nil { - return nil - } - - // Immediately drop data on permanent errors. - if consumererror.IsPermanent(err) { - return fmt.Errorf("not retryable error: %w", err) - } - - req = extractPartialRequest(req, err) - - backoffDelay := expBackoff.NextBackOff() - if backoffDelay == backoff.Stop { - return fmt.Errorf("no more retries left: %w", err) - } - - throttleErr := throttleRetry{} - if errors.As(err, &throttleErr) { - backoffDelay = max(backoffDelay, throttleErr.delay) - } - - backoffDelayStr := backoffDelay.String() - span.AddEvent( - "Exporting failed. Will retry the request after interval.", - trace.WithAttributes( - rs.traceAttribute, - attribute.String("interval", backoffDelayStr), - attribute.String("error", err.Error()))) - rs.logger.Info( - "Exporting failed. Will retry the request after interval.", - zap.Error(err), - zap.String("interval", backoffDelayStr), - ) - retryNum++ - - // back-off, but get interrupted when shutting down or request is cancelled or timed out. - select { - case <-ctx.Done(): - return fmt.Errorf("request is cancelled or timed out %w", err) - case <-rs.stopCh: - return experr.NewShutdownErr(err) - case <-time.After(backoffDelay): - } - } -} - -// max returns the larger of x or y. -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x + return internal.NewThrottleRetry(err, delay) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go index 2a6364a2aaf..090caf2d7d9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/timeout_sender.go @@ -4,47 +4,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "time" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) -// TimeoutSettings for timeout. The timeout applies to individual attempts to send data to the backend. -type TimeoutSettings struct { - // Timeout is the timeout for every attempt to send data to the backend. - // A zero timeout means no timeout. - Timeout time.Duration `mapstructure:"timeout"` -} - -func (ts *TimeoutSettings) Validate() error { - // Negative timeouts are not acceptable, since all sends will fail. - if ts.Timeout < 0 { - return errors.New("'timeout' must be non-negative") - } - return nil -} - -// NewDefaultTimeoutSettings returns the default settings for TimeoutSettings. -func NewDefaultTimeoutSettings() TimeoutSettings { - return TimeoutSettings{ - Timeout: 5 * time.Second, - } -} - -// timeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. -type timeoutSender struct { - baseRequestSender - cfg TimeoutSettings -} +type TimeoutConfig = internal.TimeoutConfig -func (ts *timeoutSender) send(ctx context.Context, req Request) error { - // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. - if ts.cfg.Timeout == 0 { - return req.Export(ctx) - } - // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be - // updated because this deadline most likely is before the next one. - tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) - defer cancelFunc() - return req.Export(tCtx) +// NewDefaultTimeoutConfig returns the default config for TimeoutConfig. +func NewDefaultTimeoutConfig() TimeoutConfig { + return internal.NewDefaultTimeoutConfig() } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go index 6f5f39f3de5..2924eea1115 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces.go @@ -13,13 +13,17 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" ) -var tracesMarshaler = &ptrace.ProtoMarshaler{} -var tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} +var ( + tracesMarshaler = &ptrace.ProtoMarshaler{} + tracesUnmarshaler = &ptrace.ProtoUnmarshaler{} +) type tracesRequest struct { td ptrace.Traces @@ -63,15 +67,15 @@ func (req *tracesRequest) ItemsCount() int { return req.td.SpanCount() } -type traceExporter struct { - *baseExporter +type tracesExporter struct { + *internal.BaseExporter consumer.Traces } -// NewTracesExporter creates an exporter.Traces that records observability metrics and wraps every request with a Span. -func NewTracesExporter( +// NewTraces creates an exporter.Traces that records observability metrics and wraps every request with a Span. +func NewTraces( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, pusher consumer.ConsumeTracesFunc, options ...Option, @@ -83,10 +87,9 @@ func NewTracesExporter( return nil, errNilPushTraceData } tracesOpts := []Option{ - withMarshaler(tracesRequestMarshaler), withUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeTraces, mergeSplitTraces), + internal.WithMarshaler(tracesRequestMarshaler), internal.WithUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), } - return NewTracesRequestExporter(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) + return NewTracesRequest(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) } // RequestFromTracesFunc converts ptrace.Traces into a user-defined Request. @@ -101,12 +104,12 @@ func requestFromTraces(pusher consumer.ConsumeTracesFunc) RequestFromTracesFunc } } -// NewTracesRequestExporter creates a new traces exporter based on a custom TracesConverter and RequestSender. +// NewTracesRequest creates a new traces exporter based on a custom TracesConverter and RequestSender. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewTracesRequestExporter( +func NewTracesRequest( _ context.Context, - set exporter.CreateSettings, + set exporter.Settings, converter RequestFromTracesFunc, options ...Option, ) (exporter.Traces, error) { @@ -118,7 +121,7 @@ func NewTracesRequestExporter( return nil, errNilTracesConverter } - be, err := newBaseExporter(set, component.DataTypeTraces, newTracesExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, pipeline.SignalTraces, newTracesWithObservability, options...) if err != nil { return nil, err } @@ -131,32 +134,33 @@ func NewTracesRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeTraces, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, pipeline.SignalTraces, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) - return &traceExporter{ - baseExporter: be, + return &tracesExporter{ + BaseExporter: be, Traces: tc, }, err } -type tracesExporterWithObservability struct { - baseRequestSender - obsrep *ObsReport +type tracesWithObservability struct { + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newTracesExporterWithObservability(obsrep *ObsReport) requestSender { - return &tracesExporterWithObservability{obsrep: obsrep} +func newTracesWithObservability(obsrep *internal.ObsReport) internal.RequestSender { + return &tracesWithObservability{obsrep: obsrep} } -func (tewo *tracesExporterWithObservability) send(ctx context.Context, req Request) error { +func (tewo *tracesWithObservability) Send(ctx context.Context, req Request) error { c := tewo.obsrep.StartTracesOp(ctx) + numTraceSpans := req.ItemsCount() // Forward the data to the next consumer (this pusher is the next). - err := tewo.nextSender.send(c, req) - tewo.obsrep.EndTracesOp(c, req.ItemsCount(), err) + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndTracesOp(c, numTraceSpans, err) return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go index 1bdada95b7b..df7f7dd1274 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/traces_batch.go @@ -11,25 +11,25 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" ) -// mergeTraces merges two traces requests into one. -func mergeTraces(_ context.Context, r1 Request, r2 Request) (Request, error) { - tr1, ok1 := r1.(*tracesRequest) +// Merge merges the provided traces request into the current request and returns the merged request. +func (req *tracesRequest) Merge(_ context.Context, r2 Request) (Request, error) { tr2, ok2 := r2.(*tracesRequest) - if !ok1 || !ok2 { + if !ok2 { return nil, errors.New("invalid input type") } - tr2.td.ResourceSpans().MoveAndAppendTo(tr1.td.ResourceSpans()) - return tr1, nil + tr2.td.ResourceSpans().MoveAndAppendTo(req.td.ResourceSpans()) + return req, nil } -// mergeSplitTraces splits and/or merges the traces into multiple requests based on the MaxSizeConfig. -func mergeSplitTraces(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +// MergeSplit splits and/or merges the provided traces request and the current request into one or more requests +// conforming with the MaxSizeConfig. +func (req *tracesRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 Request) ([]Request, error) { var ( res []Request destReq *tracesRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []Request{req, r2} { if req == nil { continue } diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/Makefile similarity index 100% rename from vendor/go.opentelemetry.io/collector/confmap/provider/fileprovider/Makefile rename to vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/Makefile diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go new file mode 100644 index 00000000000..ae681facd9a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/constants.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + +import ( + "errors" +) + +var ( + // errNilConfig is returned when an empty name is given. + errNilConfig = errors.New("nil config") + // errNilLogger is returned when a logger is nil + errNilLogger = errors.New("nil logger") + // errNilPushProfileData is returned when a nil PushProfiles is given. + errNilPushProfileData = errors.New("nil PushProfiles") + // errNilProfilesConverter is returned when a nil RequestFromProfilesFunc is given. + errNilProfilesConverter = errors.New("nil RequestFromProfilesFunc") +) diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go new file mode 100644 index 00000000000..d045dafbb81 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + +import ( + "context" + "errors" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumererror/xconsumererror" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/xexporter" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pipeline/xpipeline" +) + +var ( + profilesMarshaler = &pprofile.ProtoMarshaler{} + profilesUnmarshaler = &pprofile.ProtoUnmarshaler{} +) + +type profilesRequest struct { + pd pprofile.Profiles + pusher xconsumer.ConsumeProfilesFunc +} + +func newProfilesRequest(pd pprofile.Profiles, pusher xconsumer.ConsumeProfilesFunc) exporterhelper.Request { + return &profilesRequest{ + pd: pd, + pusher: pusher, + } +} + +func newProfileRequestUnmarshalerFunc(pusher xconsumer.ConsumeProfilesFunc) exporterqueue.Unmarshaler[exporterhelper.Request] { + return func(bytes []byte) (exporterhelper.Request, error) { + profiles, err := profilesUnmarshaler.UnmarshalProfiles(bytes) + if err != nil { + return nil, err + } + return newProfilesRequest(profiles, pusher), nil + } +} + +func profilesRequestMarshaler(req exporterhelper.Request) ([]byte, error) { + return profilesMarshaler.MarshalProfiles(req.(*profilesRequest).pd) +} + +func (req *profilesRequest) OnError(err error) exporterhelper.Request { + var profileError xconsumererror.Profiles + if errors.As(err, &profileError) { + return newProfilesRequest(profileError.Data(), req.pusher) + } + return req +} + +func (req *profilesRequest) Export(ctx context.Context) error { + return req.pusher(ctx, req.pd) +} + +func (req *profilesRequest) ItemsCount() int { + return req.pd.SampleCount() +} + +type profileExporter struct { + *internal.BaseExporter + xconsumer.Profiles +} + +// NewProfilesExporter creates an xexporter.Profiles that records observability metrics and wraps every request with a Span. +func NewProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, + pusher xconsumer.ConsumeProfilesFunc, + options ...exporterhelper.Option, +) (xexporter.Profiles, error) { + if cfg == nil { + return nil, errNilConfig + } + if pusher == nil { + return nil, errNilPushProfileData + } + profilesOpts := []exporterhelper.Option{ + internal.WithMarshaler(profilesRequestMarshaler), internal.WithUnmarshaler(newProfileRequestUnmarshalerFunc(pusher)), + } + return NewProfilesRequestExporter(ctx, set, requestFromProfiles(pusher), append(profilesOpts, options...)...) +} + +// RequestFromProfilesFunc converts pprofile.Profiles into a user-defined Request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestFromProfilesFunc func(context.Context, pprofile.Profiles) (exporterhelper.Request, error) + +// requestFromProfiles returns a RequestFromProfilesFunc that converts pprofile.Profiles into a Request. +func requestFromProfiles(pusher xconsumer.ConsumeProfilesFunc) RequestFromProfilesFunc { + return func(_ context.Context, profiles pprofile.Profiles) (exporterhelper.Request, error) { + return newProfilesRequest(profiles, pusher), nil + } +} + +// NewProfilesRequestExporter creates a new profiles exporter based on a custom ProfilesConverter and RequestSender. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func NewProfilesRequestExporter( + _ context.Context, + set exporter.Settings, + converter RequestFromProfilesFunc, + options ...exporterhelper.Option, +) (xexporter.Profiles, error) { + if set.Logger == nil { + return nil, errNilLogger + } + + if converter == nil { + return nil, errNilProfilesConverter + } + + be, err := internal.NewBaseExporter(set, xpipeline.SignalProfiles, newProfilesExporterWithObservability, options...) + if err != nil { + return nil, err + } + + tc, err := xconsumer.NewProfiles(func(ctx context.Context, pd pprofile.Profiles) error { + req, cErr := converter(ctx, pd) + if cErr != nil { + set.Logger.Error("Failed to convert profiles. Dropping data.", + zap.Int("dropped_samples", pd.SampleCount()), + zap.Error(err)) + return consumererror.NewPermanent(cErr) + } + return be.Send(ctx, req) + }, be.ConsumerOptions...) + + return &profileExporter{ + BaseExporter: be, + Profiles: tc, + }, err +} + +type profilesExporterWithObservability struct { + internal.BaseRequestSender + obsrep *internal.ObsReport +} + +func newProfilesExporterWithObservability(obsrep *internal.ObsReport) internal.RequestSender { + return &profilesExporterWithObservability{obsrep: obsrep} +} + +func (tewo *profilesExporterWithObservability) Send(ctx context.Context, req exporterhelper.Request) error { + c := tewo.obsrep.StartProfilesOp(ctx) + numSamples := req.ItemsCount() + // Forward the data to the next consumer (this pusher is the next). + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndProfilesOp(c, numSamples, err) + return err +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go new file mode 100644 index 00000000000..ae7295e791b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper/profiles_batch.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// Merge merges two profiles requests into one. +func (req *profilesRequest) Merge(_ context.Context, r2 exporterhelper.Request) (exporterhelper.Request, error) { + tr2, ok2 := r2.(*profilesRequest) + if !ok2 { + return nil, errors.New("invalid input type") + } + tr2.pd.ResourceProfiles().MoveAndAppendTo(req.pd.ResourceProfiles()) + return req, nil +} + +// MergeSplit splits and/or merges the profiles into multiple requests based on the MaxSizeConfig. +func (req *profilesRequest) MergeSplit(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r2 exporterhelper.Request) ([]exporterhelper.Request, error) { + var ( + res []exporterhelper.Request + destReq *profilesRequest + capacityLeft = cfg.MaxSizeItems + ) + for _, r := range []exporterhelper.Request{req, r2} { + if r == nil { + continue + } + srcReq, ok := r.(*profilesRequest) + if !ok { + return nil, errors.New("invalid input type") + } + if srcReq.pd.SampleCount() <= capacityLeft { + if destReq == nil { + destReq = srcReq + } else { + srcReq.pd.ResourceProfiles().MoveAndAppendTo(destReq.pd.ResourceProfiles()) + } + capacityLeft -= destReq.pd.SampleCount() + continue + } + + for { + extractedProfiles := extractProfiles(srcReq.pd, capacityLeft) + if extractedProfiles.SampleCount() == 0 { + break + } + capacityLeft -= extractedProfiles.SampleCount() + if destReq == nil { + destReq = &profilesRequest{pd: extractedProfiles, pusher: srcReq.pusher} + } else { + extractedProfiles.ResourceProfiles().MoveAndAppendTo(destReq.pd.ResourceProfiles()) + } + // Create new batch once capacity is reached. + if capacityLeft == 0 { + res = append(res, destReq) + destReq = nil + capacityLeft = cfg.MaxSizeItems + } + } + } + + if destReq != nil { + res = append(res, destReq) + } + return res, nil +} + +// extractProfiles extracts a new profiles with a maximum number of samples. +func extractProfiles(srcProfiles pprofile.Profiles, count int) pprofile.Profiles { + destProfiles := pprofile.NewProfiles() + srcProfiles.ResourceProfiles().RemoveIf(func(srcRS pprofile.ResourceProfiles) bool { + if count == 0 { + return false + } + needToExtract := samplesCount(srcRS) > count + if needToExtract { + srcRS = extractResourceProfiles(srcRS, count) + } + count -= samplesCount(srcRS) + srcRS.MoveTo(destProfiles.ResourceProfiles().AppendEmpty()) + return !needToExtract + }) + return destProfiles +} + +// extractResourceProfiles extracts profiles and returns a new resource profiles with the specified number of profiles. +func extractResourceProfiles(srcRS pprofile.ResourceProfiles, count int) pprofile.ResourceProfiles { + destRS := pprofile.NewResourceProfiles() + destRS.SetSchemaUrl(srcRS.SchemaUrl()) + srcRS.Resource().CopyTo(destRS.Resource()) + srcRS.ScopeProfiles().RemoveIf(func(srcSS pprofile.ScopeProfiles) bool { + if count == 0 { + return false + } + needToExtract := srcSS.Profiles().Len() > count + if needToExtract { + srcSS = extractScopeProfiles(srcSS, count) + } + count -= srcSS.Profiles().Len() + srcSS.MoveTo(destRS.ScopeProfiles().AppendEmpty()) + return !needToExtract + }) + srcRS.Resource().CopyTo(destRS.Resource()) + return destRS +} + +// extractScopeProfiles extracts profiles and returns a new scope profiles with the specified number of profiles. +func extractScopeProfiles(srcSS pprofile.ScopeProfiles, count int) pprofile.ScopeProfiles { + destSS := pprofile.NewScopeProfiles() + destSS.SetSchemaUrl(srcSS.SchemaUrl()) + srcSS.Scope().CopyTo(destSS.Scope()) + srcSS.Profiles().RemoveIf(func(srcProfile pprofile.Profile) bool { + if count == 0 { + return false + } + srcProfile.MoveTo(destSS.Profiles().AppendEmpty()) + count-- + return true + }) + return destSS +} + +// resourceProfilessCount calculates the total number of profiles in the pdata.ResourceProfiles. +func samplesCount(rs pprofile.ResourceProfiles) int { + count := 0 + rs.ScopeProfiles().RemoveIf(func(ss pprofile.ScopeProfiles) bool { + ss.Profiles().RemoveIf(func(sp pprofile.Profile) bool { + count += sp.Sample().Len() + return false + }) + return false + }) + return count +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go index fcc0e0e57b1..b8a2c01e5bb 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/config.go @@ -33,7 +33,7 @@ func NewDefaultConfig() Config { } } -// Validate checks if the QueueSettings configuration is valid +// Validate checks if the Config is valid func (qCfg *Config) Validate() error { if !qCfg.Enabled { return nil diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go index 5b568851b66..724cc23e0ae 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exporterqueue/queue.go @@ -9,6 +9,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/internal/queue" + "go.opentelemetry.io/collector/pipeline" ) // ErrQueueIsFull is the error that Queue returns when full. @@ -24,8 +25,8 @@ type Queue[T any] queue.Queue[T] // Settings defines settings for creating a queue. type Settings struct { - DataType component.DataType - ExporterSettings exporter.CreateSettings + Signal pipeline.Signal + ExporterSettings exporter.Settings } // Marshaler is a function that can marshal a request into bytes. @@ -46,11 +47,11 @@ type Factory[T any] func(context.Context, Settings, Config) Queue[T] // NewMemoryQueueFactory returns a factory to create a new memory queue. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewMemoryQueueFactory[T itemsCounter]() Factory[T] { +func NewMemoryQueueFactory[T any]() Factory[T] { return func(_ context.Context, _ Settings, cfg Config) Queue[T] { return queue.NewBoundedMemoryQueue[T](queue.MemoryQueueSettings[T]{ - Sizer: sizerFromConfig[T](cfg), - Capacity: capacityFromConfig(cfg), + Sizer: &queue.RequestSizer[T]{}, + Capacity: int64(cfg.QueueSize), }) } } @@ -69,15 +70,15 @@ type PersistentQueueSettings[T any] struct { // If cfg.StorageID is nil then it falls back to memory queue. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -func NewPersistentQueueFactory[T itemsCounter](storageID *component.ID, factorySettings PersistentQueueSettings[T]) Factory[T] { +func NewPersistentQueueFactory[T any](storageID *component.ID, factorySettings PersistentQueueSettings[T]) Factory[T] { if storageID == nil { return NewMemoryQueueFactory[T]() } return func(_ context.Context, set Settings, cfg Config) Queue[T] { return queue.NewPersistentQueue[T](queue.PersistentQueueSettings[T]{ - Sizer: sizerFromConfig[T](cfg), - Capacity: capacityFromConfig(cfg), - DataType: set.DataType, + Sizer: &queue.RequestSizer[T]{}, + Capacity: int64(cfg.QueueSize), + Signal: set.Signal, StorageID: *storageID, Marshaler: factorySettings.Marshaler, Unmarshaler: factorySettings.Unmarshaler, @@ -85,17 +86,3 @@ func NewPersistentQueueFactory[T itemsCounter](storageID *component.ID, factoryS }) } } - -type itemsCounter interface { - ItemsCount() int -} - -func sizerFromConfig[T itemsCounter](Config) queue.Sizer[T] { - // TODO: Handle other ways to measure the queue size once they are added. - return &queue.RequestSizer[T]{} -} - -func capacityFromConfig(cfg Config) int64 { - // TODO: Handle other ways to measure the queue size once they are added. - return int64(cfg.QueueSize) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile b/vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile new file mode 100644 index 00000000000..c1496226e59 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go index 7b3f55c1902..d708c097a68 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/contract_checker.go @@ -19,6 +19,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/receivertest" ) @@ -32,8 +33,7 @@ type uniqueIDAttrVal string type CheckConsumeContractParams struct { T *testing.T NumberOfTestElements int - // DataType to test for. - DataType component.DataType + Signal pipeline.Signal // ExporterFactory to create an exporter to be tested. ExporterFactory exporter.Factory ExporterConfig component.Config @@ -84,19 +84,19 @@ func CheckConsumeContract(params CheckConsumeContractParams) { func checkConsumeContractScenario(t *testing.T, params CheckConsumeContractParams, decisionFunc func() error, checkIfTestPassed func(*testing.T, int, requestCounter)) { mockConsumerInstance := newMockConsumer(decisionFunc) - switch params.DataType { - case component.DataTypeLogs: - r, err := params.ReceiverFactory.CreateLogsReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + switch params.Signal { + case pipeline.SignalLogs: + r, err := params.ReceiverFactory.CreateLogs(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkLogs(t, params, r, &mockConsumerInstance, checkIfTestPassed) - case component.DataTypeTraces: - r, err := params.ReceiverFactory.CreateTracesReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + case pipeline.SignalTraces: + r, err := params.ReceiverFactory.CreateTraces(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkTraces(t, params, r, &mockConsumerInstance, checkIfTestPassed) - case component.DataTypeMetrics: - r, err := params.ReceiverFactory.CreateMetricsReceiver(context.Background(), receivertest.NewNopCreateSettings(), params.ReceiverConfig, &mockConsumerInstance) + case pipeline.SignalMetrics: + r, err := params.ReceiverFactory.CreateMetrics(context.Background(), receivertest.NewNopSettings(), params.ReceiverConfig, &mockConsumerInstance) require.NoError(t, err) require.NoError(t, r.Start(context.Background(), componenttest.NewNopHost())) checkMetrics(t, params, r, &mockConsumerInstance, checkIfTestPassed) @@ -106,11 +106,12 @@ func checkConsumeContractScenario(t *testing.T, params CheckConsumeContractParam } func checkMetrics(t *testing.T, params CheckConsumeContractParams, mockReceiver component.Component, - mockConsumer *mockConsumer, checkIfTestPassed func(*testing.T, int, requestCounter)) { + mockConsumer *mockConsumer, checkIfTestPassed func(*testing.T, int, requestCounter), +) { ctx := context.Background() var exp exporter.Metrics var err error - exp, err = params.ExporterFactory.CreateMetricsExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateMetrics(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -150,7 +151,7 @@ func checkTraces(t *testing.T, params CheckConsumeContractParams, mockReceiver c ctx := context.Background() var exp exporter.Traces var err error - exp, err = params.ExporterFactory.CreateTracesExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateTraces(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -190,7 +191,7 @@ func checkLogs(t *testing.T, params CheckConsumeContractParams, mockReceiver com ctx := context.Background() var exp exporter.Logs var err error - exp, err = params.ExporterFactory.CreateLogsExporter(ctx, NewNopCreateSettings(), params.ExporterConfig) + exp, err = params.ExporterFactory.CreateLogs(ctx, NewNopSettings(), params.ExporterConfig) require.NoError(t, err) require.NotNil(t, exp) @@ -228,9 +229,9 @@ func checkLogs(t *testing.T, params CheckConsumeContractParams, mockReceiver com // Test is successful if all the elements were received successfully and no error was returned func alwaysSucceedsPassed(t *testing.T, allRecordsNumber int, reqCounter requestCounter) { require.Equal(t, allRecordsNumber, reqCounter.success) - require.Equal(t, reqCounter.total, allRecordsNumber) - require.Equal(t, reqCounter.error.nonpermanent, 0) - require.Equal(t, reqCounter.error.permanent, 0) + require.Equal(t, allRecordsNumber, reqCounter.total) + require.Equal(t, 0, reqCounter.error.nonpermanent) + require.Equal(t, 0, reqCounter.error.permanent) } // Test is successful if all the elements were retried on non-permanent errors diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go index b513e524735..0290ed69510 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/mock_consumer.go @@ -5,7 +5,7 @@ package exportertest // import "go.opentelemetry.io/collector/exporter/exportert import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "google.golang.org/grpc/codes" @@ -19,8 +19,10 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" ) -var errNonPermanent = status.Error(codes.DeadlineExceeded, "non Permanent error") -var errPermanent = status.Error(codes.Internal, "Permanent error") +var ( + errNonPermanent = status.Error(codes.DeadlineExceeded, "non Permanent error") + errPermanent = status.Error(codes.Internal, "Permanent error") +) // // randomNonPermanentErrorConsumeDecision is a decision function that succeeds approximately // // half of the time and fails with a non-permanent error the rest of the time. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go b/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go index 6589d867bec..19e062e45e9 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go +++ b/vendor/go.opentelemetry.io/collector/exporter/exportertest/nop_exporter.go @@ -12,13 +12,14 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/xexporter" ) var nopType = component.MustNewType("nop") -// NewNopCreateSettings returns a new nop settings for Create*Exporter functions. -func NewNopCreateSettings() exporter.CreateSettings { - return exporter.CreateSettings{ +// NewNopSettings returns a new nop settings for Create* functions. +func NewNopSettings() exporter.Settings { + return exporter.Settings{ ID: component.NewIDWithName(nopType, uuid.NewString()), TelemetrySettings: componenttest.NewNopTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo(), @@ -27,44 +28,41 @@ func NewNopCreateSettings() exporter.CreateSettings { // NewNopFactory returns an exporter.Factory that constructs nop exporters. func NewNopFactory() exporter.Factory { - return exporter.NewFactory( + return xexporter.NewFactory( nopType, func() component.Config { return &nopConfig{} }, - exporter.WithTraces(createTracesExporter, component.StabilityLevelStable), - exporter.WithMetrics(createMetricsExporter, component.StabilityLevelStable), - exporter.WithLogs(createLogsExporter, component.StabilityLevelStable), + xexporter.WithTraces(createTraces, component.StabilityLevelStable), + xexporter.WithMetrics(createMetrics, component.StabilityLevelStable), + xexporter.WithLogs(createLogs, component.StabilityLevelStable), + xexporter.WithProfiles(createProfiles, component.StabilityLevelAlpha), ) } -func createTracesExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Traces, error) { +func createTraces(context.Context, exporter.Settings, component.Config) (exporter.Traces, error) { return nopInstance, nil } -func createMetricsExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Metrics, error) { +func createMetrics(context.Context, exporter.Settings, component.Config) (exporter.Metrics, error) { return nopInstance, nil } -func createLogsExporter(context.Context, exporter.CreateSettings, component.Config) (exporter.Logs, error) { +func createLogs(context.Context, exporter.Settings, component.Config) (exporter.Logs, error) { + return nopInstance, nil +} + +func createProfiles(context.Context, exporter.Settings, component.Config) (xexporter.Profiles, error) { return nopInstance, nil } type nopConfig struct{} -var nopInstance = &nopExporter{ +var nopInstance = &nop{ Consumer: consumertest.NewNop(), } -// nopExporter stores consumed traces and metrics for testing purposes. -type nopExporter struct { +// nop stores consumed traces, metrics, logs and profiles for testing purposes. +type nop struct { component.StartFunc component.ShutdownFunc consumertest.Consumer } - -// NewNopBuilder returns an exporter.Builder that constructs nop receivers. -func NewNopBuilder() *exporter.Builder { - nopFactory := NewNopFactory() - return exporter.NewBuilder( - map[component.ID]component.Config{component.NewID(nopType): nopFactory.CreateDefaultConfig()}, - map[component.Type]exporter.Factory{nopType: nopFactory}) -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go new file mode 100644 index 00000000000..2250b27d1b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/batcher.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" + +import ( + "context" + "sync" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" +) + +type batch struct { + ctx context.Context + req internal.Request + idxList []uint64 +} + +// Batcher is in charge of reading items from the queue and send them out asynchronously. +type Batcher interface { + component.Component +} + +type BaseBatcher struct { + batchCfg exporterbatcher.Config + queue Queue[internal.Request] + maxWorkers int + workerPool chan bool + exportFunc func(ctx context.Context, req internal.Request) error + stopWG sync.WaitGroup +} + +func NewBatcher(batchCfg exporterbatcher.Config, + queue Queue[internal.Request], + exportFunc func(ctx context.Context, req internal.Request) error, + maxWorkers int, +) (Batcher, error) { + if !batchCfg.Enabled { + return &DisabledBatcher{ + BaseBatcher{ + batchCfg: batchCfg, + queue: queue, + maxWorkers: maxWorkers, + exportFunc: exportFunc, + stopWG: sync.WaitGroup{}, + }, + }, nil + } + + return &DefaultBatcher{ + BaseBatcher: BaseBatcher{ + batchCfg: batchCfg, + queue: queue, + maxWorkers: maxWorkers, + exportFunc: exportFunc, + stopWG: sync.WaitGroup{}, + }, + }, nil +} + +func (qb *BaseBatcher) startWorkerPool() { + if qb.maxWorkers == 0 { + return + } + qb.workerPool = make(chan bool, qb.maxWorkers) + for i := 0; i < qb.maxWorkers; i++ { + qb.workerPool <- true + } +} + +// flush exports the incoming batch synchronously. +func (qb *BaseBatcher) flush(batchToFlush batch) { + err := qb.exportFunc(batchToFlush.ctx, batchToFlush.req) + for _, idx := range batchToFlush.idxList { + qb.queue.OnProcessingFinished(idx, err) + } +} + +// flushAsync starts a goroutine that calls flushIfNecessary. It blocks until a worker is available. +func (qb *BaseBatcher) flushAsync(batchToFlush batch) { + qb.stopWG.Add(1) + if qb.maxWorkers == 0 { + go func() { + defer qb.stopWG.Done() + qb.flush(batchToFlush) + }() + return + } + <-qb.workerPool + go func() { + defer qb.stopWG.Done() + qb.flush(batchToFlush) + qb.workerPool <- true + }() +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go index 98e1b281176..015c94473df 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/bounded_memory_queue.go @@ -40,17 +40,14 @@ func (q *boundedMemoryQueue[T]) Offer(ctx context.Context, req T) error { return q.sizedChannel.push(memQueueEl[T]{ctx: ctx, req: req}, q.sizer.Sizeof(req), nil) } -// Consume applies the provided function on the head of queue. -// The call blocks until there is an item available or the queue is stopped. -// The function returns true when an item is consumed or false if the queue is stopped and emptied. -func (q *boundedMemoryQueue[T]) Consume(consumeFunc func(context.Context, T) error) bool { +func (q *boundedMemoryQueue[T]) Read(_ context.Context) (uint64, context.Context, T, bool) { item, ok := q.sizedChannel.pop(func(el memQueueEl[T]) int64 { return q.sizer.Sizeof(el.req) }) - if !ok { - return false - } - // the memory queue doesn't handle consume errors - _ = consumeFunc(item.ctx, item.req) - return true + return 0, item.ctx, item.req, ok +} + +// OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished. +// For in memory queue, this function is noop. +func (q *boundedMemoryQueue[T]) OnProcessingFinished(uint64, error) { } // Shutdown closes the queue channel to initiate draining of the queue. diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go index 7c57fea9620..b8e07c3a79f 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/consumers.go @@ -27,11 +27,7 @@ func NewQueueConsumers[T any](q Queue[T], numConsumers int, consumeFunc func(con } // Start ensures that queue and all consumers are started. -func (qc *Consumers[T]) Start(ctx context.Context, host component.Host) error { - if err := qc.queue.Start(ctx, host); err != nil { - return err - } - +func (qc *Consumers[T]) Start(_ context.Context, _ component.Host) error { var startWG sync.WaitGroup for i := 0; i < qc.numConsumers; i++ { qc.stopWG.Add(1) @@ -40,9 +36,12 @@ func (qc *Consumers[T]) Start(ctx context.Context, host component.Host) error { startWG.Done() defer qc.stopWG.Done() for { - if !qc.queue.Consume(qc.consumeFunc) { + index, ctx, req, ok := qc.queue.Read(context.Background()) + if !ok { return } + consumeErr := qc.consumeFunc(ctx, req) + qc.queue.OnProcessingFinished(index, consumeErr) } }() } @@ -52,10 +51,7 @@ func (qc *Consumers[T]) Start(ctx context.Context, host component.Host) error { } // Shutdown ensures that queue and all consumers are stopped. -func (qc *Consumers[T]) Shutdown(ctx context.Context) error { - if err := qc.queue.Shutdown(ctx); err != nil { - return err - } +func (qc *Consumers[T]) Shutdown(_ context.Context) error { qc.stopWG.Wait() return nil } diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go new file mode 100644 index 00000000000..3023fa4df46 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/default_batcher.go @@ -0,0 +1,180 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" + +import ( + "context" + "math" + "sync" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/internal" +) + +// DefaultBatcher continuously reads from the queue and flushes asynchronously if size limit is met or on timeout. +type DefaultBatcher struct { + BaseBatcher + currentBatchMu sync.Mutex + currentBatch *batch + timer *time.Timer + shutdownCh chan bool +} + +func (qb *DefaultBatcher) resetTimer() { + if qb.batchCfg.FlushTimeout != 0 { + qb.timer.Reset(qb.batchCfg.FlushTimeout) + } +} + +// startReadingFlushingGoroutine starts a goroutine that reads and then flushes. +func (qb *DefaultBatcher) startReadingFlushingGoroutine() { + qb.stopWG.Add(1) + go func() { + defer qb.stopWG.Done() + for { + // Read() blocks until the queue is non-empty or until the queue is stopped. + idx, ctx, req, ok := qb.queue.Read(context.Background()) + if !ok { + qb.shutdownCh <- true + return + } + + qb.currentBatchMu.Lock() + + if qb.batchCfg.MaxSizeItems > 0 { + var reqList []internal.Request + var mergeSplitErr error + if qb.currentBatch == nil || qb.currentBatch.req == nil { + qb.resetTimer() + reqList, mergeSplitErr = req.MergeSplit(ctx, qb.batchCfg.MaxSizeConfig, nil) + } else { + reqList, mergeSplitErr = qb.currentBatch.req.MergeSplit(ctx, qb.batchCfg.MaxSizeConfig, req) + } + + if mergeSplitErr != nil || reqList == nil { + qb.queue.OnProcessingFinished(idx, mergeSplitErr) + qb.currentBatchMu.Unlock() + continue + } + + // If there was a split, we flush everything immediately. + if reqList[0].ItemsCount() >= qb.batchCfg.MinSizeItems || len(reqList) > 1 { + qb.currentBatch = nil + qb.currentBatchMu.Unlock() + for i := 0; i < len(reqList); i++ { + qb.flushAsync(batch{ + req: reqList[i], + ctx: ctx, + idxList: []uint64{idx}, + }) + // TODO: handle partial failure + } + qb.resetTimer() + } else { + qb.currentBatch = &batch{ + req: reqList[0], + ctx: ctx, + idxList: []uint64{idx}, + } + qb.currentBatchMu.Unlock() + } + } else { + if qb.currentBatch == nil || qb.currentBatch.req == nil { + qb.resetTimer() + qb.currentBatch = &batch{ + req: req, + ctx: ctx, + idxList: []uint64{idx}, + } + } else { + mergedReq, mergeErr := qb.currentBatch.req.Merge(qb.currentBatch.ctx, req) + if mergeErr != nil { + qb.queue.OnProcessingFinished(idx, mergeErr) + qb.currentBatchMu.Unlock() + continue + } + qb.currentBatch = &batch{ + req: mergedReq, + ctx: qb.currentBatch.ctx, + idxList: append(qb.currentBatch.idxList, idx), + } + } + + if qb.currentBatch.req.ItemsCount() >= qb.batchCfg.MinSizeItems { + batchToFlush := *qb.currentBatch + qb.currentBatch = nil + qb.currentBatchMu.Unlock() + + // flushAsync() blocks until successfully started a goroutine for flushing. + qb.flushAsync(batchToFlush) + qb.resetTimer() + } else { + qb.currentBatchMu.Unlock() + } + } + } + }() +} + +// startTimeBasedFlushingGoroutine starts a goroutine that flushes on timeout. +func (qb *DefaultBatcher) startTimeBasedFlushingGoroutine() { + qb.stopWG.Add(1) + go func() { + defer qb.stopWG.Done() + for { + select { + case <-qb.shutdownCh: + return + case <-qb.timer.C: + qb.flushCurrentBatchIfNecessary() + } + } + }() +} + +// Start starts the goroutine that reads from the queue and flushes asynchronously. +func (qb *DefaultBatcher) Start(_ context.Context, _ component.Host) error { + // maxWorker being -1 means batcher is disabled. This is for testing queue sender metrics. + if qb.maxWorkers == -1 { + return nil + } + + qb.startWorkerPool() + qb.shutdownCh = make(chan bool, 1) + + if qb.batchCfg.FlushTimeout == 0 { + qb.timer = time.NewTimer(math.MaxInt) + qb.timer.Stop() + } else { + qb.timer = time.NewTimer(qb.batchCfg.FlushTimeout) + } + + qb.startReadingFlushingGoroutine() + qb.startTimeBasedFlushingGoroutine() + return nil +} + +// flushCurrentBatchIfNecessary sends out the current request batch if it is not nil +func (qb *DefaultBatcher) flushCurrentBatchIfNecessary() { + qb.currentBatchMu.Lock() + if qb.currentBatch == nil || qb.currentBatch.req == nil { + qb.currentBatchMu.Unlock() + return + } + batchToFlush := *qb.currentBatch + qb.currentBatch = nil + qb.currentBatchMu.Unlock() + + // flushAsync() blocks until successfully started a goroutine for flushing. + qb.flushAsync(batchToFlush) + qb.resetTimer() +} + +// Shutdown ensures that queue and all Batcher are stopped. +func (qb *DefaultBatcher) Shutdown(_ context.Context) error { + qb.flushCurrentBatchIfNecessary() + qb.stopWG.Wait() + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go new file mode 100644 index 00000000000..250b38e7640 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/disabled_batcher.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" + +import ( + "context" + + "go.opentelemetry.io/collector/component" +) + +// DisabledBatcher is a special-case of Batcher that has no size limit for sending. Any items read from the queue will +// be sent out (asynchronously) immediately regardless of the size. +type DisabledBatcher struct { + BaseBatcher +} + +// Start starts the goroutine that reads from the queue and flushes asynchronously. +func (qb *DisabledBatcher) Start(_ context.Context, _ component.Host) error { + // maxWorker being -1 means batcher is disabled. This is for testing queue sender metrics. + if qb.maxWorkers == -1 { + return nil + } + + qb.startWorkerPool() + + // This goroutine reads and then flushes. + // 1. Reading from the queue is blocked until the queue is non-empty or until the queue is stopped. + // 2. flushAsync() blocks until there are idle workers in the worker pool. + qb.stopWG.Add(1) + go func() { + defer qb.stopWG.Done() + for { + idx, _, req, ok := qb.queue.Read(context.Background()) + if !ok { + return + } + qb.flushAsync(batch{ + req: req, + ctx: context.Background(), + idxList: []uint64{idx}, + }) + } + }() + return nil +} + +// Shutdown ensures that queue and all Batcher are stopped. +func (qb *DisabledBatcher) Shutdown(_ context.Context) error { + qb.stopWG.Wait() + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go index 6ef9810529b..88d7c4438e0 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/mock_storage.go @@ -6,10 +6,9 @@ package queue // import "go.opentelemetry.io/collector/exporter/internal/queue" import ( "context" "errors" - "fmt" "sync" "sync/atomic" - "syscall" + "time" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/extension/experimental/storage" @@ -20,22 +19,31 @@ type mockStorageExtension struct { component.ShutdownFunc st sync.Map getClientError error + executionDelay time.Duration } func (m *mockStorageExtension) GetClient(context.Context, component.Kind, component.ID, string) (storage.Client, error) { if m.getClientError != nil { return nil, m.getClientError } - return &mockStorageClient{st: &m.st, closed: &atomic.Bool{}}, nil + return &mockStorageClient{st: &m.st, closed: &atomic.Bool{}, executionDelay: m.executionDelay}, nil } func NewMockStorageExtension(getClientError error) storage.Extension { - return &mockStorageExtension{getClientError: getClientError} + return NewMockStorageExtensionWithDelay(getClientError, 0) +} + +func NewMockStorageExtensionWithDelay(getClientError error, executionDelay time.Duration) storage.Extension { + return &mockStorageExtension{ + getClientError: getClientError, + executionDelay: executionDelay, + } } type mockStorageClient struct { - st *sync.Map - closed *atomic.Bool + st *sync.Map + closed *atomic.Bool + executionDelay time.Duration // simulate real storage client delay } func (m *mockStorageClient) Get(ctx context.Context, s string) ([]byte, error) { @@ -61,6 +69,9 @@ func (m *mockStorageClient) Batch(_ context.Context, ops ...storage.Operation) e if m.isClosed() { panic("client already closed") } + if m.executionDelay != 0 { + time.Sleep(m.executionDelay) + } for _, op := range ops { switch op.Type { case storage.Get: @@ -84,161 +95,3 @@ func (m *mockStorageClient) Batch(_ context.Context, ops ...storage.Operation) e func (m *mockStorageClient) isClosed() bool { return m.closed.Load() } - -func newFakeBoundedStorageClient(maxSizeInBytes int) *fakeBoundedStorageClient { - return &fakeBoundedStorageClient{ - st: map[string][]byte{}, - MaxSizeInBytes: maxSizeInBytes, - } -} - -// this storage client mimics the behavior of actual storage engines with limited storage space available -// in general, real storage engines often have a per-write-transaction storage overhead, needing to keep -// both the old and the new value stored until the transaction is committed -// this is useful for testing the persistent queue queue behavior with a full disk -type fakeBoundedStorageClient struct { - MaxSizeInBytes int - st map[string][]byte - sizeInBytes int - mux sync.Mutex -} - -func (m *fakeBoundedStorageClient) Get(ctx context.Context, key string) ([]byte, error) { - op := storage.GetOperation(key) - if err := m.Batch(ctx, op); err != nil { - return nil, err - } - - return op.Value, nil -} - -func (m *fakeBoundedStorageClient) Set(ctx context.Context, key string, value []byte) error { - return m.Batch(ctx, storage.SetOperation(key, value)) -} - -func (m *fakeBoundedStorageClient) Delete(ctx context.Context, key string) error { - return m.Batch(ctx, storage.DeleteOperation(key)) -} - -func (m *fakeBoundedStorageClient) Close(context.Context) error { - return nil -} - -func (m *fakeBoundedStorageClient) Batch(_ context.Context, ops ...storage.Operation) error { - m.mux.Lock() - defer m.mux.Unlock() - - totalAdded, totalRemoved := m.getTotalSizeChange(ops) - - // the assumption here is that the new data needs to coexist with the old data on disk - // for the transaction to succeed - // this seems to be true for the file storage extension at least - if m.sizeInBytes+totalAdded > m.MaxSizeInBytes { - return fmt.Errorf("insufficient space available: %w", syscall.ENOSPC) - } - - for _, op := range ops { - switch op.Type { - case storage.Get: - op.Value = m.st[op.Key] - case storage.Set: - m.st[op.Key] = op.Value - case storage.Delete: - delete(m.st, op.Key) - default: - return errors.New("wrong operation type") - } - } - - m.sizeInBytes += totalAdded - totalRemoved - - return nil -} - -func (m *fakeBoundedStorageClient) SetMaxSizeInBytes(newMaxSize int) { - m.mux.Lock() - defer m.mux.Unlock() - m.MaxSizeInBytes = newMaxSize -} - -func (m *fakeBoundedStorageClient) GetSizeInBytes() int { - m.mux.Lock() - defer m.mux.Unlock() - return m.sizeInBytes -} - -func (m *fakeBoundedStorageClient) getTotalSizeChange(ops []storage.Operation) (totalAdded int, totalRemoved int) { - totalAdded, totalRemoved = 0, 0 - for _, op := range ops { - switch op.Type { - case storage.Set: - if oldValue, ok := m.st[op.Key]; ok { - totalRemoved += len(oldValue) - } else { - totalAdded += len(op.Key) - } - totalAdded += len(op.Value) - case storage.Delete: - if value, ok := m.st[op.Key]; ok { - totalRemoved += len(op.Key) - totalRemoved += len(value) - } - default: - } - } - return totalAdded, totalRemoved -} - -func newFakeStorageClientWithErrors(errors []error) *fakeStorageClientWithErrors { - return &fakeStorageClientWithErrors{ - errors: errors, - } -} - -// this storage client just returns errors from a list in order -// used for testing error handling -type fakeStorageClientWithErrors struct { - errors []error - nextErrorIndex int - mux sync.Mutex -} - -func (m *fakeStorageClientWithErrors) Get(ctx context.Context, key string) ([]byte, error) { - op := storage.GetOperation(key) - err := m.Batch(ctx, op) - if err != nil { - return nil, err - } - - return op.Value, nil -} - -func (m *fakeStorageClientWithErrors) Set(ctx context.Context, key string, value []byte) error { - return m.Batch(ctx, storage.SetOperation(key, value)) -} - -func (m *fakeStorageClientWithErrors) Delete(ctx context.Context, key string) error { - return m.Batch(ctx, storage.DeleteOperation(key)) -} - -func (m *fakeStorageClientWithErrors) Close(context.Context) error { - return nil -} - -func (m *fakeStorageClientWithErrors) Batch(context.Context, ...storage.Operation) error { - m.mux.Lock() - defer m.mux.Unlock() - - if m.nextErrorIndex >= len(m.errors) { - return nil - } - - m.nextErrorIndex++ - return m.errors[m.nextErrorIndex-1] -} - -func (m *fakeStorageClientWithErrors) Reset() { - m.mux.Lock() - defer m.mux.Unlock() - m.nextErrorIndex = 0 -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go index 07b0d1fb628..038cb09cc39 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/persistent_queue.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/internal/experr" "go.opentelemetry.io/collector/extension/experimental/storage" + "go.opentelemetry.io/collector/pipeline" ) // persistentQueue provides a persistent queue implementation backed by file storage extension @@ -85,11 +86,11 @@ var ( type PersistentQueueSettings[T any] struct { Sizer Sizer[T] Capacity int64 - DataType component.DataType + Signal pipeline.Signal StorageID component.ID Marshaler func(req T) ([]byte, error) Unmarshaler func([]byte) (T, error) - ExporterSettings exporter.CreateSettings + ExporterSettings exporter.Settings } // NewPersistentQueue creates a new queue backed by file storage; name and signal must be a unique combination that identifies the queue storage @@ -104,7 +105,7 @@ func NewPersistentQueue[T any](set PersistentQueueSettings[T]) Queue[T] { // Start starts the persistentQueue with the given number of consumers. func (pq *persistentQueue[T]) Start(ctx context.Context, host component.Host) error { - storageClient, err := toStorageClient(ctx, pq.set.StorageID, host, pq.set.ExporterSettings.ID, pq.set.DataType) + storageClient, err := toStorageClient(ctx, pq.set.StorageID, host, pq.set.ExporterSettings.ID, pq.set.Signal) if err != nil { return err } @@ -165,6 +166,7 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex initEls = make([]permanentQueueEl, initIndexSize) } + // nolint: gosec pq.sizedChannel = newSizedChannel[permanentQueueEl](pq.set.Capacity, initEls, int64(initQueueSize)) } @@ -188,36 +190,6 @@ func (pq *persistentQueue[T]) restoreQueueSizeFromStorage(ctx context.Context) ( return bytesToItemIndex(val) } -// Consume applies the provided function on the head of queue. -// The call blocks until there is an item available or the queue is stopped. -// The function returns true when an item is consumed or false if the queue is stopped. -func (pq *persistentQueue[T]) Consume(consumeFunc func(context.Context, T) error) bool { - for { - var ( - req T - onProcessingFinished func(error) - consumed bool - ) - - // If we are stopped we still process all the other events in the channel before, but we - // return fast in the `getNextItem`, so we will free the channel fast and get to the stop. - _, ok := pq.sizedChannel.pop(func(permanentQueueEl) int64 { - req, onProcessingFinished, consumed = pq.getNextItem(context.Background()) - if !consumed { - return 0 - } - return pq.set.Sizer.Sizeof(req) - }) - if !ok { - return false - } - if consumed { - onProcessingFinished(consumeFunc(context.Background(), req)) - return true - } - } -} - func (pq *persistentQueue[T]) Shutdown(ctx context.Context) error { // If the queue is not initialized, there is nothing to shut down. if pq.client == nil { @@ -242,6 +214,7 @@ func (pq *persistentQueue[T]) backupQueueSize(ctx context.Context) error { return nil } + // nolint: gosec return pq.client.Set(ctx, queueSizeKey, itemIndexToBytes(uint64(pq.Size()))) } @@ -302,20 +275,48 @@ func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error { return nil } -// getNextItem pulls the next available item from the persistent storage along with a callback function that should be -// called after the item is processed to clean up the storage. If no new item is available, returns false. -func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (T, func(error), bool) { +func (pq *persistentQueue[T]) Read(ctx context.Context) (uint64, context.Context, T, bool) { + for { + var ( + index uint64 + req T + consumed bool + ) + _, ok := pq.sizedChannel.pop(func(permanentQueueEl) int64 { + size := int64(0) + index, req, consumed = pq.getNextItem(ctx) + if consumed { + size = pq.set.Sizer.Sizeof(req) + } + return size + }) + if !ok { + return 0, nil, req, false + } + if consumed { + return index, context.TODO(), req, true + } + + // If ok && !consumed, it means we are stopped. In this case, we still process all the other events + // in the channel before, so we will free the channel fast and get to the stop. + } +} + +// getNextItem pulls the next available item from the persistent storage along with its index. Once processing is +// finished, the index should be called with OnProcessingFinished to clean up the storage. If no new item is available, +// returns false. +func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, bool) { pq.mu.Lock() defer pq.mu.Unlock() var request T if pq.stopped { - return request, nil, false + return 0, request, false } if pq.readIndex == pq.writeIndex { - return request, nil, false + return 0, request, false } index := pq.readIndex @@ -339,45 +340,48 @@ func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (T, func(error), pq.logger.Error("Error deleting item from queue", zap.Error(err)) } - return request, nil, false + return 0, request, false } // Increase the reference count, so the client is not closed while the request is being processed. // The client cannot be closed because we hold the lock since last we checked `stopped`. pq.refClient++ - return request, func(consumeErr error) { - // Delete the item from the persistent storage after it was processed. - pq.mu.Lock() - // Always unref client even if the consumer is shutdown because we always ref it for every valid request. - defer func() { - if err = pq.unrefClient(ctx); err != nil { - pq.logger.Error("Error closing the storage client", zap.Error(err)) - } - pq.mu.Unlock() - }() - if experr.IsShutdownErr(consumeErr) { - // The queue is shutting down, don't mark the item as dispatched, so it's picked up again after restart. - // TODO: Handle partially delivered requests by updating their values in the storage. - return - } + return index, request, true +} - if err = pq.itemDispatchingFinish(ctx, index); err != nil { - pq.logger.Error("Error deleting item from queue", zap.Error(err)) +// Should be called to remove the item of the given index from the queue once processing is finished. +func (pq *persistentQueue[T]) OnProcessingFinished(index uint64, consumeErr error) { + // Delete the item from the persistent storage after it was processed. + pq.mu.Lock() + // Always unref client even if the consumer is shutdown because we always ref it for every valid request. + defer func() { + if err := pq.unrefClient(context.Background()); err != nil { + pq.logger.Error("Error closing the storage client", zap.Error(err)) } + pq.mu.Unlock() + }() - // Back up the queue size to storage on every 10 reads. The stored value is used to recover the queue size - // in case if the collector is killed. The recovered queue size is allowed to be inaccurate. - if (pq.readIndex % 10) == 0 { - if qsErr := pq.backupQueueSize(ctx); qsErr != nil { - pq.logger.Error("Error writing queue size to storage", zap.Error(err)) - } - } + if experr.IsShutdownErr(consumeErr) { + // The queue is shutting down, don't mark the item as dispatched, so it's picked up again after restart. + // TODO: Handle partially delivered requests by updating their values in the storage. + return + } - // Ensure the used size and the channel size are in sync. - pq.sizedChannel.syncSize() + if err := pq.itemDispatchingFinish(context.Background(), index); err != nil { + pq.logger.Error("Error deleting item from queue", zap.Error(err)) + } + + // Back up the queue size to storage on every 10 reads. The stored value is used to recover the queue size + // in case if the collector is killed. The recovered queue size is allowed to be inaccurate. + if (pq.readIndex % 10) == 0 { + if qsErr := pq.backupQueueSize(context.Background()); qsErr != nil { + pq.logger.Error("Error writing queue size to storage", zap.Error(qsErr)) + } + } - }, true + // Ensure the used size and the channel size are in sync. + pq.sizedChannel.syncSize() } // retrieveAndEnqueueNotDispatchedReqs gets the items for which sending was not finished, cleans the storage @@ -485,7 +489,7 @@ func (pq *persistentQueue[T]) itemDispatchingFinish(ctx context.Context, index u return nil } -func toStorageClient(ctx context.Context, storageID component.ID, host component.Host, ownerID component.ID, signal component.DataType) (storage.Client, error) { +func toStorageClient(ctx context.Context, storageID component.ID, host component.Host, ownerID component.ID, signal pipeline.Signal) (storage.Client, error) { ext, found := host.GetExtensions()[storageID] if !found { return nil, errNoStorageClient @@ -521,6 +525,7 @@ func bytesToItemIndex(buf []byte) (uint64, error) { func itemIndexArrayToBytes(arr []uint64) []byte { size := len(arr) buf := make([]byte, 0, 4+size*8) + // nolint: gosec buf = binary.LittleEndian.AppendUint32(buf, uint32(size)) for _, item := range arr { buf = binary.LittleEndian.AppendUint64(buf, item) diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go index 35bc504579e..77cac737f7e 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/queue.go @@ -12,10 +12,8 @@ import ( "go.opentelemetry.io/collector/component" ) -var ( - // ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full. - ErrQueueIsFull = errors.New("sending queue is full") -) +// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full. +var ErrQueueIsFull = errors.New("sending queue is full") // Queue defines a producer-consumer exchange which can be backed by e.g. the memory-based ring buffer queue // (boundedMemoryQueue) or via a disk-based queue (persistentQueue) @@ -25,18 +23,17 @@ type Queue[T any] interface { // without violating capacity restrictions. If success returns no error. // It returns ErrQueueIsFull if no space is currently available. Offer(ctx context.Context, item T) error - // Consume applies the provided function on the head of queue. - // The call blocks until there is an item available or the queue is stopped. - // The function returns true when an item is consumed or false if the queue is stopped. - Consume(func(ctx context.Context, item T) error) bool // Size returns the current Size of the queue Size() int // Capacity returns the capacity of the queue. Capacity() int -} - -type itemsCounter interface { - ItemsCount() int + // Read pulls the next available item from the queue along with its index. Once processing is + // finished, the index should be called with OnProcessingFinished to clean up the storage. + // The function blocks until an item is available or if the queue is stopped. + // Returns false if reading failed or if the queue is stopped. + Read(context.Context) (uint64, context.Context, T, bool) + // OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished. + OnProcessingFinished(index uint64, consumeErr error) } // Sizer is an interface that returns the size of the given element. @@ -44,13 +41,6 @@ type Sizer[T any] interface { Sizeof(T) int64 } -// ItemsSizer is a Sizer implementation that returns the size of a queue element as the number of items it contains. -type ItemsSizer[T itemsCounter] struct{} - -func (is *ItemsSizer[T]) Sizeof(el T) int64 { - return int64(el.ItemsCount()) -} - // RequestSizer is a Sizer implementation that returns the size of a queue element as one request. type RequestSizer[T any] struct{} diff --git a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go index 1702a38ac2f..f322e58c01c 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/queue/sized_channel.go @@ -55,8 +55,16 @@ func (vcq *sizedChannel[T]) push(el T, size int64, callback func() error) error return err } } - vcq.ch <- el - return nil + + select { + // for persistent queue implementation, channel len can be out of sync with used size. Attempt to put it + // into the channel. If it is full, simply returns ErrQueueIsFull error. This prevents potential deadlock issues. + case vcq.ch <- el: + return nil + default: + vcq.used.Add(-size) + return ErrQueueIsFull + } } // pop removes the element from the queue and returns it. diff --git a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/request.go b/vendor/go.opentelemetry.io/collector/exporter/internal/request.go similarity index 53% rename from vendor/go.opentelemetry.io/collector/exporter/exporterhelper/request.go rename to vendor/go.opentelemetry.io/collector/exporter/internal/request.go index 74f0186541f..bd24d982de6 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/exporterhelper/request.go +++ b/vendor/go.opentelemetry.io/collector/exporter/internal/request.go @@ -1,10 +1,12 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal // import "go.opentelemetry.io/collector/exporter/internal" import ( "context" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" ) // Request represents a single request that can be sent to an external endpoint. @@ -17,6 +19,21 @@ type Request interface { // sent. For example, for OTLP exporter, this value represents the number of spans, // metric data points or log records. ItemsCount() int + // Merge is a function that merges this request with another one into a single request. + // Do not mutate the requests passed to the function if error can be returned after mutation or if the exporter is + // marked as not mutable. + // Experimental: This API is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. + Merge(context.Context, Request) (Request, error) + // MergeSplit is a function that merge and/or splits this request with another one into multiple requests based on the + // configured limit provided in MaxSizeConfig. + // All the returned requests MUST have a number of items that does not exceed the maximum number of items. + // Size of the last returned request MUST be less or equal than the size of any other returned request. + // The original request MUST not be mutated if error is returned after mutation or if the exporter is + // marked as not mutable. The length of the returned slice MUST not be 0. + // Experimental: This API is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. + MergeSplit(context.Context, exporterbatcher.MaxSizeConfig, Request) ([]Request, error) } // RequestErrorHandler is an optional interface that can be implemented by Request to provide a way handle partial @@ -31,12 +48,3 @@ type RequestErrorHandler interface { // Otherwise, it should return the original Request. OnError(error) Request } - -// extractPartialRequest returns a new Request that may contain the items left to be sent -// if only some items failed to process and can be retried. Otherwise, it returns the original Request. -func extractPartialRequest(req Request, err error) Request { - if errReq, ok := req.(RequestErrorHandler); ok { - return errReq.OnError(err) - } - return req -} diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md index 8926a14369b..bcee108135f 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/README.md @@ -3,15 +3,19 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | +| Distributions | [core], [contrib], [k8s], [otlp] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s +[otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp Export data via gRPC using [OTLP]( @@ -58,4 +62,4 @@ Several helper files are leveraged to provide additional capabilities automatica - [gRPC settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configgrpc/README.md) - [TLS and mTLS settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md) -- [Queuing, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) +- [Queuing, batching, retry and timeout settings](https://github.com/open-telemetry/opentelemetry-collector/blob/main/exporter/exporterhelper/README.md) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml index 073ec5707ee..a57fd2fd230 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/cfg-schema.yaml @@ -7,7 +7,7 @@ fields: doc: | Timeout is the timeout for every attempt to send data to the backend. - name: sending_queue - type: exporterhelper.QueueSettings + type: exporterhelper.QueueConfig kind: struct fields: - name: enabled @@ -68,8 +68,8 @@ fields: - name: endpoint kind: string doc: | - The target to which the exporter is going to send traces or metrics, - using the gRPC protocol. The valid syntax is described at + The target to which the exporter is going to send traces, metrics, logs or + profiles using the gRPC protocol. The valid syntax is described at https://github.com/grpc/grpc/blob/master/doc/naming.md. - name: compression kind: string diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go index 62956f293ed..b9697125377 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/config.go @@ -7,20 +7,26 @@ import ( "errors" "fmt" "net" + "regexp" "strconv" "strings" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" ) // Config defines configuration for OTLP exporter. type Config struct { - exporterhelper.TimeoutSettings `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - QueueConfig exporterhelper.QueueSettings `mapstructure:"sending_queue"` - RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + exporterhelper.TimeoutConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueConfig `mapstructure:"sending_queue"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + + // Experimental: This configuration is at the early stage of development and may change without backward compatibility + // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved + BatcherConfig exporterbatcher.Config `mapstructure:"batcher"` configgrpc.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. } @@ -49,8 +55,9 @@ func (c *Config) sanitizedEndpoint() string { return strings.TrimPrefix(c.Endpoint, "http://") case strings.HasPrefix(c.Endpoint, "https://"): return strings.TrimPrefix(c.Endpoint, "https://") - case strings.HasPrefix(c.Endpoint, "dns:///"): - return strings.TrimPrefix(c.Endpoint, "dns:///") + case strings.HasPrefix(c.Endpoint, "dns://"): + r := regexp.MustCompile("^dns://[/]?") + return r.ReplaceAllString(c.Endpoint, "") default: return c.Endpoint } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go index 3f09a753285..9f87b392974 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/doc.go @@ -3,5 +3,5 @@ //go:generate mdatagen metadata.yaml -// Package otlpexporter exports data by using the OTLP format to a gPRC endpoint. +// Package otlpexporter exports data by using the OTLP format to a gRPC endpoint. package otlpexporter // import "go.opentelemetry.io/collector/exporter/otlpexporter" diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go index 8a72aab8cdc..52133573de0 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/factory.go @@ -13,26 +13,34 @@ import ( "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" "go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata" + "go.opentelemetry.io/collector/exporter/xexporter" ) // NewFactory creates a factory for OTLP exporter. func NewFactory() exporter.Factory { - return exporter.NewFactory( + return xexporter.NewFactory( metadata.Type, createDefaultConfig, - exporter.WithTraces(createTracesExporter, metadata.TracesStability), - exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), - exporter.WithLogs(createLogsExporter, metadata.LogsStability), + xexporter.WithTraces(createTraces, metadata.TracesStability), + xexporter.WithMetrics(createMetrics, metadata.MetricsStability), + xexporter.WithLogs(createLogs, metadata.LogsStability), + xexporter.WithProfiles(createProfilesExporter, metadata.ProfilesStability), ) } func createDefaultConfig() component.Config { + batcherCfg := exporterbatcher.NewDefaultConfig() + batcherCfg.Enabled = false + return &Config{ - TimeoutSettings: exporterhelper.NewDefaultTimeoutSettings(), - RetryConfig: configretry.NewDefaultBackOffConfig(), - QueueConfig: exporterhelper.NewDefaultQueueSettings(), + TimeoutConfig: exporterhelper.NewDefaultTimeoutConfig(), + RetryConfig: configretry.NewDefaultBackOffConfig(), + QueueConfig: exporterhelper.NewDefaultQueueConfig(), + BatcherConfig: batcherCfg, ClientConfig: configgrpc.ClientConfig{ Headers: map[string]configopaque.String{}, // Default to gzip compression @@ -43,54 +51,77 @@ func createDefaultConfig() component.Config { } } -func createTracesExporter( +func createTraces( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewTracesExporter(ctx, set, cfg, + return exporterhelper.NewTraces(ctx, set, cfg, oce.pushTraces, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), exporterhelper.WithStart(oce.start), - exporterhelper.WithShutdown(oce.shutdown)) + exporterhelper.WithShutdown(oce.shutdown), + ) } -func createMetricsExporter( +func createMetrics( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewMetricsExporter(ctx, set, cfg, + return exporterhelper.NewMetrics(ctx, set, cfg, oce.pushMetrics, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), exporterhelper.WithStart(oce.start), exporterhelper.WithShutdown(oce.shutdown), ) } -func createLogsExporter( +func createLogs( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewLogsExporter(ctx, set, cfg, + return exporterhelper.NewLogs(ctx, set, cfg, oce.pushLogs, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown), + ) +} + +func createProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (xexporter.Profiles, error) { + oce := newExporter(cfg, set) + oCfg := cfg.(*Config) + return xexporterhelper.NewProfilesExporter(ctx, set, cfg, + oce.pushProfiles, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), exporterhelper.WithStart(oce.start), exporterhelper.WithShutdown(oce.shutdown), ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go index c9cff844fa2..82f0f448a86 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata/generated_status.go @@ -7,11 +7,13 @@ import ( ) var ( - Type = component.MustNewType("otlp") + Type = component.MustNewType("otlp") + ScopeName = "go.opentelemetry.io/collector/exporter/otlpexporter" ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + ProfilesStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml index f24e40e1f91..0c909a5496e 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/metadata.yaml @@ -1,12 +1,14 @@ type: otlp +github_project: open-telemetry/opentelemetry-collector status: class: exporter stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + development: [profiles] + distributions: [core, contrib, k8s, otlp] tests: config: - endpoint: otelcol:4317 \ No newline at end of file + endpoint: otelcol:4317 diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go index 21864d7723a..20bf32f8411 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlpexporter/otlp.go @@ -17,6 +17,7 @@ import ( "google.golang.org/grpc/status" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" @@ -24,6 +25,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) @@ -33,12 +36,13 @@ type baseExporter struct { config *Config // gRPC clients and connection. - traceExporter ptraceotlp.GRPCClient - metricExporter pmetricotlp.GRPCClient - logExporter plogotlp.GRPCClient - clientConn *grpc.ClientConn - metadata metadata.MD - callOptions []grpc.CallOption + traceExporter ptraceotlp.GRPCClient + metricExporter pmetricotlp.GRPCClient + logExporter plogotlp.GRPCClient + profileExporter pprofileotlp.GRPCClient + clientConn *grpc.ClientConn + metadata metadata.MD + callOptions []grpc.CallOption settings component.TelemetrySettings @@ -46,7 +50,7 @@ type baseExporter struct { userAgent string } -func newExporter(cfg component.Config, set exporter.CreateSettings) *baseExporter { +func newExporter(cfg component.Config, set exporter.Settings) *baseExporter { oCfg := cfg.(*Config) userAgent := fmt.Sprintf("%s/%s (%s/%s)", @@ -58,12 +62,14 @@ func newExporter(cfg component.Config, set exporter.CreateSettings) *baseExporte // start actually creates the gRPC connection. The client construction is deferred till this point as this // is the only place we get hold of Extensions which are required to construct auth round tripper. func (e *baseExporter) start(ctx context.Context, host component.Host) (err error) { - if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings, grpc.WithUserAgent(e.userAgent)); err != nil { + agentOpt := configgrpc.WithGrpcDialOption(grpc.WithUserAgent(e.userAgent)) + if e.clientConn, err = e.config.ClientConfig.ToClientConn(ctx, host, e.settings, agentOpt); err != nil { return err } e.traceExporter = ptraceotlp.NewGRPCClient(e.clientConn) e.metricExporter = pmetricotlp.NewGRPCClient(e.clientConn) e.logExporter = plogotlp.NewGRPCClient(e.clientConn) + e.profileExporter = pprofileotlp.NewGRPCClient(e.clientConn) headers := map[string]string{} for k, v := range e.config.ClientConfig.Headers { headers[k] = string(v) @@ -131,6 +137,22 @@ func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { return nil } +func (e *baseExporter) pushProfiles(ctx context.Context, td pprofile.Profiles) error { + req := pprofileotlp.NewExportRequestFromProfiles(td) + resp, respErr := e.profileExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) + if err := processError(respErr); err != nil { + return err + } + partialSuccess := resp.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) { + e.settings.Logger.Warn("Partial success response", + zap.String("message", resp.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_profiles", resp.PartialSuccess().RejectedProfiles()), + ) + } + return nil +} + func (e *baseExporter) enhanceContext(ctx context.Context) context.Context { if e.metadata.Len() > 0 { return metadata.NewOutgoingContext(ctx, e.metadata) @@ -151,8 +173,7 @@ func processError(err error) error { return nil } - // Now, this is this a real error. - + // Now, this is a real error. retryInfo := getRetryInfo(st) if !shouldRetry(st.Code(), retryInfo) { @@ -168,7 +189,6 @@ func processError(err error) error { } // Need to retry. - return err } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md index fb15c0dab7a..f4adf1731c7 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/README.md @@ -3,15 +3,19 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlphttp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlphttp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlphttp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlphttp) | +| Distributions | [core], [contrib], [k8s], [otlp] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlphttp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlphttp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlphttp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlphttp) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s +[otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp Export traces and/or metrics via HTTP using [OTLP]( diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go index ef59fc324a0..f3a70fa2337 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/config.go @@ -45,9 +45,9 @@ func (e *EncodingType) UnmarshalText(text []byte) error { // Config defines configuration for OTLP/HTTP exporter. type Config struct { - confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. - QueueConfig exporterhelper.QueueSettings `mapstructure:"sending_queue"` - RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` + confighttp.ClientConfig `mapstructure:",squash"` // squash ensures fields are correctly decoded in embedded struct. + exporterhelper.QueueConfig `mapstructure:"sending_queue"` + RetryConfig configretry.BackOffConfig `mapstructure:"retry_on_failure"` // The URL to send traces to. If omitted the Endpoint + "/v1/traces" will be used. TracesEndpoint string `mapstructure:"traces_endpoint"` diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go index 9ebcc01fba1..01fcc0436e4 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/factory.go @@ -13,43 +13,49 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configcompression" "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper" "go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata" + "go.opentelemetry.io/collector/exporter/xexporter" ) // NewFactory creates a factory for OTLP exporter. func NewFactory() exporter.Factory { - return exporter.NewFactory( + return xexporter.NewFactory( metadata.Type, createDefaultConfig, - exporter.WithTraces(createTracesExporter, metadata.TracesStability), - exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), - exporter.WithLogs(createLogsExporter, metadata.LogsStability), + xexporter.WithTraces(createTraces, metadata.TracesStability), + xexporter.WithMetrics(createMetrics, metadata.MetricsStability), + xexporter.WithLogs(createLogs, metadata.LogsStability), + xexporter.WithProfiles(createProfiles, metadata.ProfilesStability), ) } func createDefaultConfig() component.Config { + clientConfig := confighttp.NewDefaultClientConfig() + clientConfig.Timeout = 30 * time.Second + // Default to gzip compression + clientConfig.Compression = configcompression.TypeGzip + // We almost read 0 bytes, so no need to tune ReadBufferSize. + clientConfig.WriteBufferSize = 512 * 1024 + return &Config{ - RetryConfig: configretry.NewDefaultBackOffConfig(), - QueueConfig: exporterhelper.NewDefaultQueueSettings(), - Encoding: EncodingProto, - ClientConfig: confighttp.ClientConfig{ - Endpoint: "", - Timeout: 30 * time.Second, - Headers: map[string]configopaque.String{}, - // Default to gzip compression - Compression: configcompression.TypeGzip, - // We almost read 0 bytes, so no need to tune ReadBufferSize. - WriteBufferSize: 512 * 1024, - }, + RetryConfig: configretry.NewDefaultBackOffConfig(), + QueueConfig: exporterhelper.NewDefaultQueueConfig(), + Encoding: EncodingProto, + ClientConfig: clientConfig, } } -func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string) (string, error) { +// composeSignalURL composes the final URL for the signal (traces, metrics, logs) based on the configuration. +// oCfg is the configuration of the exporter. +// signalOverrideURL is the URL specified in the signal specific configuration (empty if not specified). +// signalName is the name of the signal, e.g. "traces", "metrics", "logs". +// signalVersion is the version of the signal, e.g. "v1" or "v1development". +func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string, signalVersion string) (string, error) { switch { case signalOverrideURL != "": _, err := url.Parse(signalOverrideURL) @@ -61,15 +67,15 @@ func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string) return "", fmt.Errorf("either endpoint or %s_endpoint must be specified", signalName) default: if strings.HasSuffix(oCfg.Endpoint, "/") { - return oCfg.Endpoint + "v1/" + signalName, nil + return oCfg.Endpoint + signalVersion + "/" + signalName, nil } - return oCfg.Endpoint + "/v1/" + signalName, nil + return oCfg.Endpoint + "/" + signalVersion + "/" + signalName, nil } } -func createTracesExporter( +func createTraces( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Traces, error) { oce, err := newExporter(cfg, set) @@ -78,24 +84,24 @@ func createTracesExporter( } oCfg := cfg.(*Config) - oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces") + oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces", "v1") if err != nil { return nil, err } - return exporterhelper.NewTracesExporter(ctx, set, cfg, + return exporterhelper.NewTraces(ctx, set, cfg, oce.pushTraces, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig)) } -func createMetricsExporter( +func createMetrics( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Metrics, error) { oce, err := newExporter(cfg, set) @@ -104,24 +110,24 @@ func createMetricsExporter( } oCfg := cfg.(*Config) - oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics") + oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics", "v1") if err != nil { return nil, err } - return exporterhelper.NewMetricsExporter(ctx, set, cfg, + return exporterhelper.NewMetrics(ctx, set, cfg, oce.pushMetrics, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig)) } -func createLogsExporter( +func createLogs( ctx context.Context, - set exporter.CreateSettings, + set exporter.Settings, cfg component.Config, ) (exporter.Logs, error) { oce, err := newExporter(cfg, set) @@ -129,18 +135,43 @@ func createLogsExporter( return nil, err } oCfg := cfg.(*Config) - - oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs") + oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs", "v1") if err != nil { return nil, err } - return exporterhelper.NewLogsExporter(ctx, set, cfg, + return exporterhelper.NewLogs(ctx, set, cfg, oce.pushLogs, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig)) +} + +func createProfiles( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (xexporter.Profiles, error) { + oce, err := newExporter(cfg, set) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.profilesURL, err = composeSignalURL(oCfg, "", "profiles", "v1development") + if err != nil { + return nil, err + } + + return xexporterhelper.NewProfilesExporter(ctx, set, cfg, + oce.pushProfiles, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig)) } diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go index 8af2905a456..07bd3d1d7a6 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata/generated_status.go @@ -7,11 +7,13 @@ import ( ) var ( - Type = component.MustNewType("otlphttp") + Type = component.MustNewType("otlphttp") + ScopeName = "go.opentelemetry.io/collector/exporter/otlphttpexporter" ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + ProfilesStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml index 5e1c41d3243..5e3b73fd355 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/metadata.yaml @@ -1,13 +1,15 @@ type: otlphttp +github_project: open-telemetry/opentelemetry-collector status: class: exporter stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + development: [profiles] + distributions: [core, contrib, k8s, otlp] tests: config: endpoint: "https://1.2.3.4:1234" - + diff --git a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go index ea02be512f4..0decd18315a 100644 --- a/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go +++ b/vendor/go.opentelemetry.io/collector/exporter/otlphttpexporter/otlp.go @@ -28,19 +28,22 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) type baseExporter struct { // Input configuration. - config *Config - client *http.Client - tracesURL string - metricsURL string - logsURL string - logger *zap.Logger - settings component.TelemetrySettings + config *Config + client *http.Client + tracesURL string + metricsURL string + logsURL string + profilesURL string + logger *zap.Logger + settings component.TelemetrySettings // Default user-agent header. userAgent string } @@ -54,7 +57,7 @@ const ( ) // Create new exporter. -func newExporter(cfg component.Config, set exporter.CreateSettings) (*baseExporter, error) { +func newExporter(cfg component.Config, set exporter.Settings) (*baseExporter, error) { oCfg := cfg.(*Config) if oCfg.Endpoint != "" { @@ -149,6 +152,27 @@ func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { return e.export(ctx, e.logsURL, request, e.logsPartialSuccessHandler) } +func (e *baseExporter) pushProfiles(ctx context.Context, td pprofile.Profiles) error { + tr := pprofileotlp.NewExportRequestFromProfiles(td) + + var err error + var request []byte + switch e.config.Encoding { + case EncodingJSON: + request, err = tr.MarshalJSON() + case EncodingProto: + request, err = tr.MarshalProto() + default: + err = fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + if err != nil { + return consumererror.NewPermanent(err) + } + + return e.export(ctx, e.profilesURL, request, e.profilesPartialSuccessHandler) +} + func (e *baseExporter) export(ctx context.Context, url string, request []byte, partialSuccessHandler partialSuccessHandler) error { e.logger.Debug("Preparing to make HTTP request", zap.String("url", url)) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) @@ -392,3 +416,33 @@ func (e *baseExporter) logsPartialSuccessHandler(protoBytes []byte, contentType } return nil } + +func (e *baseExporter) profilesPartialSuccessHandler(protoBytes []byte, contentType string) error { + if protoBytes == nil { + return nil + } + exportResponse := pprofileotlp.NewExportResponse() + switch contentType { + case protobufContentType: + err := exportResponse.UnmarshalProto(protoBytes) + if err != nil { + return fmt.Errorf("error parsing protobuf response: %w", err) + } + case jsonContentType: + err := exportResponse.UnmarshalJSON(protoBytes) + if err != nil { + return fmt.Errorf("error parsing json response: %w", err) + } + default: + return nil + } + + partialSuccess := exportResponse.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) { + e.logger.Warn("Partial success response", + zap.String("message", exportResponse.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_samples", exportResponse.PartialSuccess().RejectedProfiles()), + ) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE b/vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile b/vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go b/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go new file mode 100644 index 00000000000..7d83e92c8a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/exporter/xexporter/exporter.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xexporter // import "go.opentelemetry.io/collector/exporter/xexporter" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/pipeline" +) + +// Profiles is an exporter that can consume profiles. +type Profiles interface { + component.Component + xconsumer.Profiles +} + +type Factory interface { + exporter.Factory + + // CreateProfiles creates a Profiles exporter based on this config. + // If the exporter type does not support tracing, + // this function returns the error [pipeline.ErrSignalNotSupported]. + CreateProfiles(ctx context.Context, set exporter.Settings, cfg component.Config) (Profiles, error) + + // ProfilesStability gets the stability level of the Profiles exporter. + ProfilesStability() component.StabilityLevel +} + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is an ReceiverFactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factoryOpts struct { + opts []exporter.FactoryOption + *factory +} + +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles. +type CreateProfilesFunc func(context.Context, exporter.Settings, component.Config) (Profiles, error) + +// CreateProfiles implements Factory.CreateProfiles. +func (f CreateProfilesFunc) CreateProfiles(ctx context.Context, set exporter.Settings, cfg component.Config) (Profiles, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg) +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces exporter.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithTraces(createTraces, sl)) + }) +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics exporter.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithMetrics(createMetrics, sl)) + }) +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs exporter.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, exporter.WithLogs(createLogs, sl)) + }) +} + +// WithProfiles overrides the default "error not supported" implementation for CreateProfilesExporter and the default "undefined" stability level. +func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesStabilityLevel = sl + o.CreateProfilesFunc = createProfiles + }) +} + +type factory struct { + exporter.Factory + CreateProfilesFunc + profilesStabilityLevel component.StabilityLevel +} + +func (f *factory) ProfilesStability() component.StabilityLevel { + return f.profilesStabilityLevel +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.factory.Factory = exporter.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/extension/README.md b/vendor/go.opentelemetry.io/collector/extension/README.md index 4f9661e1414..da453dac647 100644 --- a/vendor/go.opentelemetry.io/collector/extension/README.md +++ b/vendor/go.opentelemetry.io/collector/extension/README.md @@ -10,7 +10,6 @@ performance profile. Supported service extensions (sorted alphabetically): -- [Memory Ballast](ballastextension/README.md) - [zPages](zpagesextension/README.md) The [contributors @@ -28,5 +27,5 @@ will be shutdown. The ordering is determined in the `extensions` tag under the service: # Extensions specified below are going to be loaded by the service in the # order given below, and shutdown on reverse order. - extensions: [memory_ballast, zpages] + extensions: [extension1, extension2] ``` diff --git a/vendor/go.opentelemetry.io/collector/extension/auth/client.go b/vendor/go.opentelemetry.io/collector/extension/auth/client.go index d1855d8aece..03eba4ed336 100644 --- a/vendor/go.opentelemetry.io/collector/extension/auth/client.go +++ b/vendor/go.opentelemetry.io/collector/extension/auth/client.go @@ -26,7 +26,15 @@ type Client interface { } // ClientOption represents the possible options for NewClient. -type ClientOption func(*defaultClient) +type ClientOption interface { + apply(*defaultClient) +} + +type clientOptionFunc func(*defaultClient) + +func (of clientOptionFunc) apply(e *defaultClient) { + of(e) +} // ClientRoundTripperFunc specifies the function that returns a RoundTripper that can be used to authenticate HTTP requests. type ClientRoundTripperFunc func(base http.RoundTripper) (http.RoundTripper, error) @@ -58,33 +66,33 @@ type defaultClient struct { // WithClientStart overrides the default `Start` function for a component.Component. // The default always returns nil. func WithClientStart(startFunc component.StartFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.StartFunc = startFunc - } + }) } // WithClientShutdown overrides the default `Shutdown` function for a component.Component. // The default always returns nil. func WithClientShutdown(shutdownFunc component.ShutdownFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.ShutdownFunc = shutdownFunc - } + }) } // WithClientRoundTripper provides a `RoundTripper` function for this client authenticator. // The default round tripper is no-op. func WithClientRoundTripper(roundTripperFunc ClientRoundTripperFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.ClientRoundTripperFunc = roundTripperFunc - } + }) } // WithClientPerRPCCredentials provides a `PerRPCCredentials` function for this client authenticator. // There's no default. func WithClientPerRPCCredentials(perRPCCredentialsFunc ClientPerRPCCredentialsFunc) ClientOption { - return func(o *defaultClient) { + return clientOptionFunc(func(o *defaultClient) { o.ClientPerRPCCredentialsFunc = perRPCCredentialsFunc - } + }) } // NewClient returns a Client configured with the provided options. @@ -92,7 +100,7 @@ func NewClient(options ...ClientOption) Client { bc := &defaultClient{} for _, op := range options { - op(bc) + op.apply(bc) } return bc diff --git a/vendor/go.opentelemetry.io/collector/extension/auth/server.go b/vendor/go.opentelemetry.io/collector/extension/auth/server.go index 6e552e4285d..256dc51ce9c 100644 --- a/vendor/go.opentelemetry.io/collector/extension/auth/server.go +++ b/vendor/go.opentelemetry.io/collector/extension/auth/server.go @@ -18,7 +18,7 @@ import ( type Server interface { extension.Extension - // Authenticate checks whether the given headers map contains valid auth data. Successfully authenticated calls will always return a nil error. + // Authenticate checks whether the given map contains valid auth data. Successfully authenticated calls will always return a nil error. // When the authentication fails, an error must be returned and the caller must not retry. This function is typically called from interceptors, // on behalf of receivers, but receivers can still call this directly if the usage of interceptors isn't suitable. // The deadline and cancellation given to this function must be respected, but note that authentication data has to be part of the map, not context. @@ -26,7 +26,7 @@ type Server interface { // authentication data (if possible). This will allow other components in the pipeline to make decisions based on that data, such as routing based // on tenancy as determined by the group membership, or passing through the authentication data to the next collector/backend. // The context keys to be used are not defined yet. - Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) + Authenticate(ctx context.Context, sources map[string][]string) (context.Context, error) } type defaultServer struct { @@ -36,40 +36,48 @@ type defaultServer struct { } // ServerOption represents the possible options for NewServer. -type ServerOption func(*defaultServer) +type ServerOption interface { + apply(*defaultServer) +} + +type serverOptionFunc func(*defaultServer) + +func (of serverOptionFunc) apply(e *defaultServer) { + of(e) +} // ServerAuthenticateFunc defines the signature for the function responsible for performing the authentication based -// on the given headers map. See Server.Authenticate. -type ServerAuthenticateFunc func(ctx context.Context, headers map[string][]string) (context.Context, error) +// on the given sources map. See Server.Authenticate. +type ServerAuthenticateFunc func(ctx context.Context, sources map[string][]string) (context.Context, error) -func (f ServerAuthenticateFunc) Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) { +func (f ServerAuthenticateFunc) Authenticate(ctx context.Context, sources map[string][]string) (context.Context, error) { if f == nil { return ctx, nil } - return f(ctx, headers) + return f(ctx, sources) } // WithServerAuthenticate specifies which function to use to perform the authentication. func WithServerAuthenticate(authFunc ServerAuthenticateFunc) ServerOption { - return func(o *defaultServer) { + return serverOptionFunc(func(o *defaultServer) { o.ServerAuthenticateFunc = authFunc - } + }) } // WithServerStart overrides the default `Start` function for a component.Component. // The default always returns nil. func WithServerStart(startFunc component.StartFunc) ServerOption { - return func(o *defaultServer) { + return serverOptionFunc(func(o *defaultServer) { o.StartFunc = startFunc - } + }) } // WithServerShutdown overrides the default `Shutdown` function for a component.Component. // The default always returns nil. func WithServerShutdown(shutdownFunc component.ShutdownFunc) ServerOption { - return func(o *defaultServer) { + return serverOptionFunc(func(o *defaultServer) { o.ShutdownFunc = shutdownFunc - } + }) } // NewServer returns a Server configured with the provided options. @@ -77,7 +85,7 @@ func NewServer(options ...ServerOption) Server { bc := &defaultServer{} for _, op := range options { - op(bc) + op.apply(bc) } return bc diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/LICENSE b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/Makefile b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/Makefile index ded7a36092d..bdd863a203b 100644 --- a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/Makefile +++ b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/Makefile @@ -1 +1 @@ -include ../../Makefile.Common +include ../../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go index 184787db0f7..901b6fdd83f 100644 --- a/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go +++ b/vendor/go.opentelemetry.io/collector/extension/experimental/storage/storage.go @@ -35,7 +35,6 @@ type Extension interface { // // [overwrite | not-found | no-op] from "real" problems type Client interface { - // Get will retrieve data from storage that corresponds to the // specified key. It should return (nil, nil) if not found Get(ctx context.Context, key string) ([]byte, error) diff --git a/vendor/go.opentelemetry.io/collector/extension/extension.go b/vendor/go.opentelemetry.io/collector/extension/extension.go index 292358b33ba..5bc630a9e4d 100644 --- a/vendor/go.opentelemetry.io/collector/extension/extension.go +++ b/vendor/go.opentelemetry.io/collector/extension/extension.go @@ -8,7 +8,6 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/confmap" ) // Extension is the interface for objects hosted by the OpenTelemetry Collector that @@ -16,54 +15,17 @@ import ( // to the service, examples: health check endpoint, z-pages, etc. type Extension = component.Component -// Dependent is an optional interface that can be implemented by extensions -// that depend on other extensions and must be started only after their dependencies. -// See https://github.com/open-telemetry/opentelemetry-collector/pull/8768 for examples. -type Dependent interface { - Extension - Dependencies() []component.ID +// ModuleInfo describes the go module for each component. +type ModuleInfo struct { + Receiver map[component.Type]string + Processor map[component.Type]string + Exporter map[component.Type]string + Extension map[component.Type]string + Connector map[component.Type]string } -// PipelineWatcher is an extra interface for Extension hosted by the OpenTelemetry -// Collector that is to be implemented by extensions interested in changes to pipeline -// states. Typically this will be used by extensions that change their behavior if data is -// being ingested or not, e.g.: a k8s readiness probe. -type PipelineWatcher interface { - // Ready notifies the Extension that all pipelines were built and the - // receivers were started, i.e.: the service is ready to receive data - // (note that it may already have received data when this method is called). - Ready() error - - // NotReady notifies the Extension that all receivers are about to be stopped, - // i.e.: pipeline receivers will not accept new data. - // This is sent before receivers are stopped, so the Extension can take any - // appropriate actions before that happens. - NotReady() error -} - -// ConfigWatcher is an interface that should be implemented by an extension that -// wishes to be notified of the Collector's effective configuration. -type ConfigWatcher interface { - // NotifyConfig notifies the extension of the Collector's current effective configuration. - // The extension owns the `confmap.Conf`. Callers must ensure that it's safe for - // extensions to store the `conf` pointer and use it concurrently with any other - // instances of `conf`. - NotifyConfig(ctx context.Context, conf *confmap.Conf) error -} - -// StatusWatcher is an extra interface for Extension hosted by the OpenTelemetry -// Collector that is to be implemented by extensions interested in changes to component -// status. -type StatusWatcher interface { - // ComponentStatusChanged notifies about a change in the source component status. - // Extensions that implement this interface must be ready that the ComponentStatusChanged - // may be called before, after or concurrently with calls to Component.Start() and Component.Shutdown(). - // The function may be called concurrently with itself. - ComponentStatusChanged(source *component.InstanceID, event *component.StatusEvent) -} - -// CreateSettings is passed to Factory.Create(...) function. -type CreateSettings struct { +// Settings is passed to Factory.Create(...) function. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -71,24 +33,27 @@ type CreateSettings struct { // BuildInfo can be used by components for informational purposes BuildInfo component.BuildInfo + + // ModuleInfo describes the go module for each component. + ModuleInfo ModuleInfo } // CreateFunc is the equivalent of Factory.Create(...) function. -type CreateFunc func(context.Context, CreateSettings, component.Config) (Extension, error) +type CreateFunc func(context.Context, Settings, component.Config) (Extension, error) -// CreateExtension implements Factory.Create. -func (f CreateFunc) CreateExtension(ctx context.Context, set CreateSettings, cfg component.Config) (Extension, error) { +// Create implements Factory.Create. +func (f CreateFunc) Create(ctx context.Context, set Settings, cfg component.Config) (Extension, error) { return f(ctx, set, cfg) } type Factory interface { component.Factory - // CreateExtension creates an extension based on the given config. - CreateExtension(ctx context.Context, set CreateSettings, cfg component.Config) (Extension, error) + // Create an extension based on the given config. + Create(ctx context.Context, set Settings, cfg component.Config) (Extension, error) - // ExtensionStability gets the stability level of the Extension. - ExtensionStability() component.StabilityLevel + // Stability gets the stability level of the Extension. + Stability() component.StabilityLevel unexportedFactoryFunc() } @@ -106,7 +71,7 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f *factory) ExtensionStability() component.StabilityLevel { +func (f *factory) Stability() component.StabilityLevel { return f.extensionStability } @@ -115,7 +80,8 @@ func NewFactory( cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, createServiceExtension CreateFunc, - sl component.StabilityLevel) Factory { + sl component.StabilityLevel, +) Factory { return &factory{ cfgType: cfgType, CreateDefaultConfigFunc: createDefaultConfig, @@ -136,39 +102,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder extension is a helper struct that given a set of Configs and Factories helps with creating extensions. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new extension.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// Create creates an extension based on the settings and configs available. -func (b *Builder) Create(ctx context.Context, set CreateSettings) (Extension, error) { - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("extension %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("extension factory not available for: %q", set.ID) - } - - sl := f.ExtensionStability() - if sl >= component.StabilityLevelAlpha { - set.Logger.Debug(sl.LogMessage()) - } else { - set.Logger.Info(sl.LogMessage()) - } - return f.CreateExtension(ctx, set, cfg) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} diff --git a/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go new file mode 100644 index 00000000000..370210970ed --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensioncapabilities/interfaces.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package extensioncapabilities provides interfaces that can be implemented by extensions +// to provide additional capabilities. +package extensioncapabilities // import "go.opentelemetry.io/collector/extension/extensioncapabilities" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/extension" +) + +// Dependent is an optional interface that can be implemented by extensions +// that depend on other extensions and must be started only after their dependencies. +// See https://github.com/open-telemetry/opentelemetry-collector/pull/8768 for examples. +type Dependent interface { + extension.Extension + Dependencies() []component.ID +} + +// PipelineWatcher is an extra interface for Extension hosted by the OpenTelemetry +// Collector that is to be implemented by extensions interested in changes to pipeline +// states. Typically this will be used by extensions that change their behavior if data is +// being ingested or not, e.g.: a k8s readiness probe. +type PipelineWatcher interface { + // Ready notifies the Extension that all pipelines were built and the + // receivers were started, i.e.: the service is ready to receive data + // (note that it may already have received data when this method is called). + Ready() error + + // NotReady notifies the Extension that all receivers are about to be stopped, + // i.e.: pipeline receivers will not accept new data. + // This is sent before receivers are stopped, so the Extension can take any + // appropriate actions before that happens. + NotReady() error +} + +// ConfigWatcher is an interface that should be implemented by an extension that +// wishes to be notified of the Collector's effective configuration. +type ConfigWatcher interface { + // NotifyConfig notifies the extension of the Collector's current effective configuration. + // The extension owns the `confmap.Conf`. Callers must ensure that it's safe for + // extensions to store the `conf` pointer and use it concurrently with any other + // instances of `conf`. + NotifyConfig(ctx context.Context, conf *confmap.Conf) error +} diff --git a/vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE b/vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensiontest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile b/vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensiontest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go b/vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go new file mode 100644 index 00000000000..6fef44d513b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/extension/extensiontest/nop_extension.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package extensiontest // import "go.opentelemetry.io/collector/extension/extensiontest" + +import ( + "context" + + "github.com/google/uuid" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/extension" +) + +var nopType = component.MustNewType("nop") + +// NewNopSettings returns a new nop settings for extension.Factory Create* functions. +func NewNopSettings() extension.Settings { + return extension.Settings{ + ID: component.NewIDWithName(nopType, uuid.NewString()), + TelemetrySettings: componenttest.NewNopTelemetrySettings(), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +// NewNopFactory returns an extension.Factory that constructs nop extensions. +func NewNopFactory() extension.Factory { + return extension.NewFactory( + nopType, + func() component.Config { + return &nopConfig{} + }, + func(context.Context, extension.Settings, component.Config) (extension.Extension, error) { + return nopInstance, nil + }, + component.StabilityLevelStable) +} + +type nopConfig struct{} + +var nopInstance = &nopExtension{} + +// nopExtension acts as an extension for testing purposes. +type nopExtension struct { + component.StartFunc + component.ShutdownFunc +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/flag.go b/vendor/go.opentelemetry.io/collector/featuregate/flag.go index 95012968126..1c6f3a5e873 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/flag.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/flag.go @@ -31,6 +31,12 @@ type flagValue struct { } func (f *flagValue) String() string { + // This function can be called by isZeroValue https://github.com/golang/go/blob/go1.23.3/src/flag/flag.go#L630 + // which creates an instance of flagValue using reflect.New. In this case, the field `reg` is nil. + if f.reg == nil { + return "" + } + var ids []string f.reg.VisitAll(func(g *Gate) { id := g.ID() diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go index 69486d623ee..9309024c38b 100644 --- a/vendor/go.opentelemetry.io/collector/featuregate/registry.go +++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go @@ -23,10 +23,8 @@ var ( idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`) ) -var ( - // ErrAlreadyRegistered is returned when adding a Gate that is already registered. - ErrAlreadyRegistered = errors.New("gate is already registered") -) +// ErrAlreadyRegistered is returned when adding a Gate that is already registered. +var ErrAlreadyRegistered = errors.New("gate is already registered") // GlobalRegistry returns the global Registry. func GlobalRegistry() *Registry { @@ -118,11 +116,11 @@ func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) func validateID(id string) error { if id == "" { - return fmt.Errorf("empty ID") + return errors.New("empty ID") } if !idRegexp.MatchString(id) { - return fmt.Errorf("invalid character(s) in ID") + return errors.New("invalid character(s) in ID") } return nil } diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go index bff5c1a897e..0574ee3194f 100644 --- a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/logs.go @@ -15,7 +15,7 @@ import ( ) // NewLogs wraps multiple log consumers in a single one. -// It fanouts the incoming data to all the consumers, and does smart routing: +// It fans out the incoming data to all the consumers, and does smart routing: // - Clones only to the consumer that needs to mutate the data. // - If all consumers needs to mutate the data one will get the original mutable data. func NewLogs(lcs []consumer.Logs) consumer.Logs { diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go index 32d9514561d..57cf1066810 100644 --- a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/metrics.go @@ -13,7 +13,7 @@ import ( ) // NewMetrics wraps multiple metrics consumers in a single one. -// It fanouts the incoming data to all the consumers, and does smart routing: +// It fans out the incoming data to all the consumers, and does smart routing: // - Clones only to the consumer that needs to mutate the data. // - If all consumers needs to mutate the data one will get the original mutable data. func NewMetrics(mcs []consumer.Metrics) consumer.Metrics { diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go new file mode 100644 index 00000000000..17c6d1dce81 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/profiles.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package fanoutconsumer // import "go.opentelemetry.io/collector/internal/fanoutconsumer" + +import ( + "context" + + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// NewProfiles wraps multiple profile consumers in a single one. +// It fans out the incoming data to all the consumers, and does smart routing: +// - Clones only to the consumer that needs to mutate the data. +// - If all consumers needs to mutate the data one will get the original mutable data. +func NewProfiles(tcs []xconsumer.Profiles) xconsumer.Profiles { + // Don't wrap if there is only one non-mutating consumer. + if len(tcs) == 1 && !tcs[0].Capabilities().MutatesData { + return tcs[0] + } + + tc := &profilesConsumer{} + for i := 0; i < len(tcs); i++ { + if tcs[i].Capabilities().MutatesData { + tc.mutable = append(tc.mutable, tcs[i]) + } else { + tc.readonly = append(tc.readonly, tcs[i]) + } + } + return tc +} + +type profilesConsumer struct { + mutable []xconsumer.Profiles + readonly []xconsumer.Profiles +} + +func (tsc *profilesConsumer) Capabilities() consumer.Capabilities { + // If all consumers are mutating, then the original data will be passed to one of them. + return consumer.Capabilities{MutatesData: len(tsc.mutable) > 0 && len(tsc.readonly) == 0} +} + +// ConsumeProfiles exports the pprofile.Profiles to all consumers wrapped by the current one. +func (tsc *profilesConsumer) ConsumeProfiles(ctx context.Context, td pprofile.Profiles) error { + var errs error + + if len(tsc.mutable) > 0 { + // Clone the data before sending to all mutating consumers except the last one. + for i := 0; i < len(tsc.mutable)-1; i++ { + errs = multierr.Append(errs, tsc.mutable[i].ConsumeProfiles(ctx, cloneProfiles(td))) + } + // Send data as is to the last mutating consumer only if there are no other non-mutating consumers and the + // data is mutable. Never share the same data between a mutating and a non-mutating consumer since the + // non-mutating consumer may process data async and the mutating consumer may change the data before that. + lastConsumer := tsc.mutable[len(tsc.mutable)-1] + if len(tsc.readonly) == 0 && !td.IsReadOnly() { + errs = multierr.Append(errs, lastConsumer.ConsumeProfiles(ctx, td)) + } else { + errs = multierr.Append(errs, lastConsumer.ConsumeProfiles(ctx, cloneProfiles(td))) + } + } + + // Mark the data as read-only if it will be sent to more than one read-only consumer. + if len(tsc.readonly) > 1 && !td.IsReadOnly() { + td.MarkReadOnly() + } + for _, tc := range tsc.readonly { + errs = multierr.Append(errs, tc.ConsumeProfiles(ctx, td)) + } + + return errs +} + +func cloneProfiles(td pprofile.Profiles) pprofile.Profiles { + clonedProfiles := pprofile.NewProfiles() + td.CopyTo(clonedProfiles) + return clonedProfiles +} diff --git a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go index f9a34027017..ff415e9db9b 100644 --- a/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go +++ b/vendor/go.opentelemetry.io/collector/internal/fanoutconsumer/traces.go @@ -13,7 +13,7 @@ import ( ) // NewTraces wraps multiple trace consumers in a single one. -// It fanouts the incoming data to all the consumers, and does smart routing: +// It fans out the incoming data to all the consumers, and does smart routing: // - Clones only to the consumer that needs to mutate the data. // - If all consumers needs to mutate the data one will get the original mutable data. func NewTraces(tcs []consumer.Traces) consumer.Traces { diff --git a/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go b/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go deleted file mode 100644 index 78c19622967..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/localhostgate/featuregate.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// package localhostgate defines a feature gate that controls whether server-like receivers and extensions use localhost as the default host for their endpoints. -// This package is duplicated across core and contrib to avoid exposing the feature gate as part of the public API. -// To do this we define a `registerOrLoad` helper and try to register the gate in both modules. -// IMPORTANT NOTE: ANY CHANGES TO THIS PACKAGE MUST BE MIRRORED IN THE CONTRIB COUNTERPART. -package localhostgate // import "go.opentelemetry.io/collector/internal/localhostgate" - -import ( - "errors" - "fmt" - - "go.uber.org/zap" - - "go.opentelemetry.io/collector/featuregate" -) - -const UseLocalHostAsDefaultHostID = "component.UseLocalHostAsDefaultHost" - -// UseLocalHostAsDefaultHostfeatureGate is the feature gate that controls whether -// server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints. -var UseLocalHostAsDefaultHostfeatureGate = mustRegisterOrLoad( - featuregate.GlobalRegistry(), - UseLocalHostAsDefaultHostID, - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether server-like receivers and extensions such as the OTLP receiver use localhost as the default host for their endpoints"), -) - -// mustRegisterOrLoad tries to register the feature gate and loads it if it already exists. -// It panics on any other error. -func mustRegisterOrLoad(reg *featuregate.Registry, id string, stage featuregate.Stage, opts ...featuregate.RegisterOption) *featuregate.Gate { - gate, err := reg.Register(id, stage, opts...) - - if errors.Is(err, featuregate.ErrAlreadyRegistered) { - // Gate is already registered; find it. - // Only a handful of feature gates are registered, so it's fine to iterate over all of them. - reg.VisitAll(func(g *featuregate.Gate) { - if g.ID() == id { - gate = g - return - } - }) - } else if err != nil { - panic(err) - } - - return gate -} - -// EndpointForPort gets the endpoint for a given port using localhost or 0.0.0.0 depending on the feature gate. -func EndpointForPort(port int) string { - host := "localhost" - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - host = "0.0.0.0" - } - return fmt.Sprintf("%s:%d", host, port) -} - -// LogAboutUseLocalHostAsDefault logs about the upcoming change from 0.0.0.0 to localhost on server-like components. -func LogAboutUseLocalHostAsDefault(logger *zap.Logger) { - if !UseLocalHostAsDefaultHostfeatureGate.IsEnabled() { - logger.Warn( - "The default endpoints for all servers in components will change to use localhost instead of 0.0.0.0 in a future version. Use the feature gate to preview the new default.", - zap.String("feature gate ID", UseLocalHostAsDefaultHostID), - ) - } -} diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go deleted file mode 100644 index 3cbd8fc4a83..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_processor.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - // ProcessorKey is the key used to identify processors in metrics and traces. - ProcessorKey = "processor" - - // DroppedSpansKey is the key used to identify spans dropped by the Collector. - DroppedSpansKey = "dropped_spans" - - // DroppedMetricPointsKey is the key used to identify metric points dropped by the Collector. - DroppedMetricPointsKey = "dropped_metric_points" - - // DroppedLogRecordsKey is the key used to identify log records dropped by the Collector. - DroppedLogRecordsKey = "dropped_log_records" -) - -var ( - ProcessorMetricPrefix = ProcessorKey + MetricNameSep -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go deleted file mode 100644 index 4254891f33a..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_scraper.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - // ScraperKey used to identify scrapers in metrics and traces. - ScraperKey = "scraper" - - // ScrapedMetricPointsKey used to identify metric points scraped by the - // Collector. - ScrapedMetricPointsKey = "scraped_metric_points" - // ErroredMetricPointsKey used to identify metric points errored (i.e. - // unable to be scraped) by the Collector. - ErroredMetricPointsKey = "errored_metric_points" -) - -const ( - ScraperPrefix = ScraperKey + SpanNameSep - ScraperMetricPrefix = ScraperKey + MetricNameSep - ScraperMetricsOperationSuffix = SpanNameSep + "MetricsScraped" -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go deleted file mode 100644 index 02ce450d3a9..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obsmetrics.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package obsmetrics defines the obsreport metrics for each components -// all the metrics is in OpenCensus format which will be replaced with OTEL Metrics -// in the future -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" - -const ( - SpanNameSep = "/" - MetricNameSep = "_" - Scope = "go.opentelemetry.io/collector/obsreport" -) diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go b/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go deleted file mode 100644 index dc5d4915569..00000000000 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsreportconfig.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package obsreportconfig // import "go.opentelemetry.io/collector/internal/obsreportconfig" - -import ( - "go.opentelemetry.io/collector/featuregate" -) - -// DisableHighCardinalityMetricsfeatureGate is the feature gate that controls whether the collector should enable -// potentially high cardinality metrics. The gate will be removed when the collector allows for view configuration. -var DisableHighCardinalityMetricsfeatureGate = featuregate.GlobalRegistry().MustRegister( - "telemetry.disableHighCardinalityMetrics", - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether the collector should enable potentially high"+ - "cardinality metrics. The gate will be removed when the collector allows for view configuration.")) - -// UseOtelWithSDKConfigurationForInternalTelemetryFeatureGate is the feature gate that controls whether the collector -// supports configuring the OpenTelemetry SDK via configuration -var UseOtelWithSDKConfigurationForInternalTelemetryFeatureGate = featuregate.GlobalRegistry().MustRegister( - "telemetry.useOtelWithSDKConfigurationForInternalTelemetry", - featuregate.StageAlpha, - featuregate.WithRegisterDescription("controls whether the collector supports extended OpenTelemetry"+ - "configuration for internal telemetry")) diff --git a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go index 1a3e65878c2..397e978483e 100644 --- a/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go +++ b/vendor/go.opentelemetry.io/collector/internal/sharedcomponent/sharedcomponent.go @@ -8,9 +8,11 @@ package sharedcomponent // import "go.opentelemetry.io/collector/internal/shared import ( "context" + "slices" "sync" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" ) func NewMap[K comparable, V component.Component]() *Map[K, V] { @@ -27,21 +29,10 @@ type Map[K comparable, V component.Component] struct { // LoadOrStore returns the already created instance if exists, otherwise creates a new instance // and adds it to the map of references. -func (m *Map[K, V]) LoadOrStore(key K, create func() (V, error), telemetrySettings *component.TelemetrySettings) (*Component[V], error) { +func (m *Map[K, V]) LoadOrStore(key K, create func() (V, error)) (*Component[V], error) { m.lock.Lock() defer m.lock.Unlock() if c, ok := m.components[key]; ok { - // If we haven't already seen this telemetry settings, this shared component represents - // another instance. Wrap ReportStatus to report for all instances this shared - // component represents. - if _, ok := c.seenSettings[telemetrySettings]; !ok { - c.seenSettings[telemetrySettings] = struct{}{} - prev := c.telemetry.ReportStatus - c.telemetry.ReportStatus = func(ev *component.StatusEvent) { - telemetrySettings.ReportStatus(ev) - prev(ev) - } - } return c, nil } comp, err := create() @@ -56,10 +47,6 @@ func (m *Map[K, V]) LoadOrStore(key K, create func() (V, error), telemetrySettin defer m.lock.Unlock() delete(m.components, key) }, - telemetry: telemetrySettings, - seenSettings: map[*component.TelemetrySettings]struct{}{ - telemetrySettings: {}, - }, } m.components[key] = newComp return newComp, nil @@ -74,8 +61,7 @@ type Component[V component.Component] struct { stopOnce sync.Once removeFunc func() - telemetry *component.TelemetrySettings - seenSettings map[*component.TelemetrySettings]struct{} + hostWrapper *hostWrapper } // Unwrap returns the original component. @@ -85,18 +71,72 @@ func (c *Component[V]) Unwrap() V { // Start starts the underlying component if it never started before. func (c *Component[V]) Start(ctx context.Context, host component.Host) error { - var err error - c.startOnce.Do(func() { - // It's important that status for a shared component is reported through its - // telemetry settings to keep status in sync and avoid race conditions. This logic duplicates - // and takes priority over the automated status reporting that happens in graph, making the - // status reporting in graph a no-op. - c.telemetry.ReportStatus(component.NewStatusEvent(component.StatusStarting)) - if err = c.component.Start(ctx, host); err != nil { - c.telemetry.ReportStatus(component.NewPermanentErrorEvent(err)) - } - }) - return err + if c.hostWrapper == nil { + var err error + c.startOnce.Do(func() { + c.hostWrapper = &hostWrapper{ + host: host, + sources: make([]componentstatus.Reporter, 0), + previousEvents: make([]*componentstatus.Event, 0), + } + statusReporter, isStatusReporter := host.(componentstatus.Reporter) + if isStatusReporter { + c.hostWrapper.addSource(statusReporter) + } + + // It's important that status for a shared component is reported through its + // telemetry settings to keep status in sync and avoid race conditions. This logic duplicates + // and takes priority over the automated status reporting that happens in graph, making the + // status reporting in graph a no-op. + c.hostWrapper.Report(componentstatus.NewEvent(componentstatus.StatusStarting)) + if err = c.component.Start(ctx, c.hostWrapper); err != nil { + c.hostWrapper.Report(componentstatus.NewPermanentErrorEvent(err)) + } + }) + return err + } + statusReporter, isStatusReporter := host.(componentstatus.Reporter) + if isStatusReporter { + c.hostWrapper.addSource(statusReporter) + } + return nil +} + +var ( + _ component.Host = (*hostWrapper)(nil) + _ componentstatus.Reporter = (*hostWrapper)(nil) +) + +type hostWrapper struct { + host component.Host + sources []componentstatus.Reporter + previousEvents []*componentstatus.Event + lock sync.Mutex +} + +func (h *hostWrapper) GetExtensions() map[component.ID]component.Component { + return h.host.GetExtensions() +} + +func (h *hostWrapper) Report(e *componentstatus.Event) { + // Only remember an event if it will be emitted and it has not been sent already. + h.lock.Lock() + defer h.lock.Unlock() + if len(h.sources) > 0 && !slices.Contains(h.previousEvents, e) { + h.previousEvents = append(h.previousEvents, e) + } + for _, s := range h.sources { + s.Report(e) + } +} + +func (h *hostWrapper) addSource(s componentstatus.Reporter) { + h.lock.Lock() + defer h.lock.Unlock() + for _, e := range h.previousEvents { + s.Report(e) + } + h.sources = append(h.sources, s) } // Shutdown shuts down the underlying component. @@ -107,12 +147,16 @@ func (c *Component[V]) Shutdown(ctx context.Context) error { // telemetry settings to keep status in sync and avoid race conditions. This logic duplicates // and takes priority over the automated status reporting that happens in graph, making the // status reporting in graph a no-op. - c.telemetry.ReportStatus(component.NewStatusEvent(component.StatusStopping)) + if c.hostWrapper != nil { + c.hostWrapper.Report(componentstatus.NewEvent(componentstatus.StatusStopping)) + } err = c.component.Shutdown(ctx) - if err != nil { - c.telemetry.ReportStatus(component.NewPermanentErrorEvent(err)) - } else { - c.telemetry.ReportStatus(component.NewStatusEvent(component.StatusStopped)) + if c.hostWrapper != nil { + if err != nil { + c.hostWrapper.Report(componentstatus.NewPermanentErrorEvent(err)) + } else { + c.hostWrapper.Report(componentstatus.NewEvent(componentstatus.StatusStopped)) + } } c.removeFunc() }) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go b/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go index 447bf13cb52..dd334a8454e 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/buffered_core.go @@ -6,7 +6,7 @@ package otelcol // import "go.opentelemetry.io/collector/otelcol" import ( - "fmt" + "errors" "sync" "go.uber.org/zap/zapcore" @@ -55,7 +55,7 @@ func (bc *bufferedCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { bc.mu.Lock() defer bc.mu.Unlock() if bc.logsTaken { - return fmt.Errorf("the buffered logs have already been taken so writing is no longer supported") + return errors.New("the buffered logs have already been taken so writing is no longer supported") } all := make([]zapcore.Field, 0, len(fields)+len(bc.context)) all = append(all, bc.context...) diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector.go b/vendor/go.opentelemetry.io/collector/otelcol/collector.go index 8f9839af6c8..13ac5986ee5 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector.go @@ -21,12 +21,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/connector" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" "go.opentelemetry.io/collector/otelcol/internal/grpclog" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/service" ) @@ -99,7 +95,7 @@ type Collector struct { serviceConfig *service.Config service *service.Service - state *atomic.Int32 + state *atomic.Int64 // shutdownChan is used to terminate the collector. shutdownChan chan struct{} @@ -125,8 +121,8 @@ func NewCollector(set CollectorSettings) (*Collector, error) { return nil, err } - state := &atomic.Int32{} - state.Store(int32(StateStarting)) + state := new(atomic.Int64) + state.Store(int64(StateStarting)) return &Collector{ set: set, state: state, @@ -163,17 +159,6 @@ func (col *Collector) Shutdown() { func (col *Collector) setupConfigurationComponents(ctx context.Context) error { col.setCollectorState(StateStarting) - var conf *confmap.Conf - - if cp, ok := col.configProvider.(ConfmapProvider); ok { - var err error - conf, err = cp.GetConfmap(ctx) - - if err != nil { - return fmt.Errorf("failed to resolve config: %w", err) - } - } - factories, err := col.set.Factories() if err != nil { return fmt.Errorf("failed to initialize factories: %w", err) @@ -189,14 +174,34 @@ func (col *Collector) setupConfigurationComponents(ctx context.Context) error { col.serviceConfig = &cfg.Service + conf := confmap.New() + + if err = conf.Marshal(cfg); err != nil { + return fmt.Errorf("could not marshal configuration: %w", err) + } + col.service, err = service.New(ctx, service.Settings{ - BuildInfo: col.set.BuildInfo, - CollectorConf: conf, - Receivers: receiver.NewBuilder(cfg.Receivers, factories.Receivers), - Processors: processor.NewBuilder(cfg.Processors, factories.Processors), - Exporters: exporter.NewBuilder(cfg.Exporters, factories.Exporters), - Connectors: connector.NewBuilder(cfg.Connectors, factories.Connectors), - Extensions: extension.NewBuilder(cfg.Extensions, factories.Extensions), + BuildInfo: col.set.BuildInfo, + CollectorConf: conf, + + ReceiversConfigs: cfg.Receivers, + ReceiversFactories: factories.Receivers, + ProcessorsConfigs: cfg.Processors, + ProcessorsFactories: factories.Processors, + ExportersConfigs: cfg.Exporters, + ExportersFactories: factories.Exporters, + ConnectorsConfigs: cfg.Connectors, + ConnectorsFactories: factories.Connectors, + ExtensionsConfigs: cfg.Extensions, + ExtensionsFactories: factories.Extensions, + + ModuleInfo: extension.ModuleInfo{ + Receiver: factories.ReceiverModules, + Processor: factories.ProcessorModules, + Exporter: factories.ExporterModules, + Extension: factories.ExtensionModules, + Connector: factories.ConnectorModules, + }, AsyncErrorChannel: col.asyncErrorChannel, LoggingOptions: col.set.LoggingOptions, }, cfg.Service) @@ -361,5 +366,5 @@ func (col *Collector) shutdown(ctx context.Context) error { // setCollectorState provides current state of the collector func (col *Collector) setCollectorState(state State) { - col.state.Store(int32(state)) + col.state.Store(int64(state)) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go b/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go index 3df08386bbf..ce52a4647d4 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/collector_windows.go @@ -99,10 +99,7 @@ func (s *windowsService) start(elog *eventlog.Log, colErrorChannel chan error) e // only read at the time of the Run method call. To work around this, we pass the // serviceConfig as a pointer to the logging options, and then read its value // when the zap.Logger is created by the telemetry. - s.col.set.LoggingOptions = append( - s.col.set.LoggingOptions, - zap.WrapCore(withWindowsCore(elog, &s.col.serviceConfig)), - ) + s.col.set.LoggingOptions = loggingOptionsWithEventLogCore(elog, &s.col.serviceConfig, s.col.set.LoggingOptions) // col.Run blocks until receiving a SIGTERM signal, so needs to be started // asynchronously, but it will exit early if an error occurs on startup @@ -141,6 +138,20 @@ func openEventLog(serviceName string) (*eventlog.Log, error) { return elog, nil } +func loggingOptionsWithEventLogCore( + elog *eventlog.Log, + serviceConfig **service.Config, + userOptions []zap.Option, +) []zap.Option { + return append( + // The order below must be preserved - see PR #11051 + // The event log core must run *after* any user provided options, so it + // must be the first option in this list. + []zap.Option{zap.WrapCore(withWindowsCore(elog, serviceConfig))}, + userOptions..., + ) +} + var _ zapcore.Core = (*windowsEventLogCore)(nil) type windowsEventLogCore struct { @@ -214,6 +225,7 @@ func withWindowsCore(elog *eventlog.Log, serviceConfig **service.Config) func(za // Use the Windows Event Log encoderConfig := zap.NewProductionEncoderConfig() encoderConfig.LineEnding = "\r\n" + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder return windowsEventLogCore{core, elog, zapcore.NewConsoleEncoder(encoderConfig)} } } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command.go b/vendor/go.opentelemetry.io/collector/otelcol/command.go index 648b3a5ffda..6efd16df562 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command.go @@ -16,6 +16,7 @@ import ( // Any URIs specified in CollectorSettings.ConfigProviderSettings.ResolverSettings.URIs // are considered defaults and will be overwritten by config flags passed as // command-line arguments to the executable. +// At least one Provider must be set. func NewCommand(set CollectorSettings) *cobra.Command { flagSet := flags(featuregate.GlobalRegistry()) rootCmd := &cobra.Command{ @@ -52,11 +53,13 @@ func updateSettingsUsingFlags(set *CollectorSettings, flags *flag.FlagSet) error if len(resolverSet.URIs) == 0 { return errors.New("at least one config flag must be provided") } - // Provide a default set of providers and converters if none have been specified. - // TODO: Remove this after CollectorSettings.ConfigProvider is removed and instead - // do it in the builder. - if len(resolverSet.ProviderFactories) == 0 && len(resolverSet.ConverterFactories) == 0 { - set.ConfigProviderSettings = newDefaultConfigProviderSettings(resolverSet.URIs) + + if set.ConfigProviderSettings.ResolverSettings.DefaultScheme == "" { + set.ConfigProviderSettings.ResolverSettings.DefaultScheme = "env" + } + + if len(resolverSet.ProviderFactories) == 0 { + return errors.New("at least one Provider must be supplied") } return nil } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/command_components.go b/vendor/go.opentelemetry.io/collector/otelcol/command_components.go index da13d6779a7..7b3f8e1a8b7 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/command_components.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/command_components.go @@ -20,6 +20,7 @@ import ( type componentWithStability struct { Name component.Type + Module string Stability map[string]string } @@ -40,7 +41,6 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { Long: "Outputs available components in this collector distribution including their stability levels. The output format is not stable and can change between releases.", Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, _ []string) error { - factories, err := set.Factories() if err != nil { return fmt.Errorf("failed to initialize factories: %w", err) @@ -49,7 +49,8 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { components := componentsOutput{} for _, con := range sortFactoriesByType[connector.Factory](factories.Connectors) { components.Connectors = append(components.Connectors, componentWithStability{ - Name: con.Type(), + Name: con.Type(), + Module: factories.ConnectorModules[con.Type()], Stability: map[string]string{ "logs-to-logs": con.LogsToLogsStability().String(), "logs-to-metrics": con.LogsToMetricsStability().String(), @@ -67,39 +68,43 @@ func newComponentsCommand(set CollectorSettings) *cobra.Command { } for _, ext := range sortFactoriesByType[extension.Factory](factories.Extensions) { components.Extensions = append(components.Extensions, componentWithStability{ - Name: ext.Type(), + Name: ext.Type(), + Module: factories.ExtensionModules[ext.Type()], Stability: map[string]string{ - "extension": ext.ExtensionStability().String(), + "extension": ext.Stability().String(), }, }) } for _, prs := range sortFactoriesByType[processor.Factory](factories.Processors) { components.Processors = append(components.Processors, componentWithStability{ - Name: prs.Type(), + Name: prs.Type(), + Module: factories.ProcessorModules[prs.Type()], Stability: map[string]string{ - "logs": prs.LogsProcessorStability().String(), - "metrics": prs.MetricsProcessorStability().String(), - "traces": prs.TracesProcessorStability().String(), + "logs": prs.LogsStability().String(), + "metrics": prs.MetricsStability().String(), + "traces": prs.TracesStability().String(), }, }) } for _, rcv := range sortFactoriesByType[receiver.Factory](factories.Receivers) { components.Receivers = append(components.Receivers, componentWithStability{ - Name: rcv.Type(), + Name: rcv.Type(), + Module: factories.ReceiverModules[rcv.Type()], Stability: map[string]string{ - "logs": rcv.LogsReceiverStability().String(), - "metrics": rcv.MetricsReceiverStability().String(), - "traces": rcv.TracesReceiverStability().String(), + "logs": rcv.LogsStability().String(), + "metrics": rcv.MetricsStability().String(), + "traces": rcv.TracesStability().String(), }, }) } for _, exp := range sortFactoriesByType[exporter.Factory](factories.Exporters) { components.Exporters = append(components.Exporters, componentWithStability{ - Name: exp.Type(), + Name: exp.Type(), + Module: factories.ExporterModules[exp.Type()], Stability: map[string]string{ - "logs": exp.LogsExporterStability().String(), - "metrics": exp.MetricsExporterStability().String(), - "traces": exp.TracesExporterStability().String(), + "logs": exp.LogsStability().String(), + "metrics": exp.MetricsStability().String(), + "traces": exp.TracesStability().String(), }, }) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/config.go b/vendor/go.opentelemetry.io/collector/otelcol/config.go index e622617721b..33975957282 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/config.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/config.go @@ -130,14 +130,14 @@ func (cfg *Config) Validate() error { if _, ok := cfg.Connectors[ref]; ok { continue } - return fmt.Errorf("service::pipelines::%s: references receiver %q which is not configured", pipelineID, ref) + return fmt.Errorf("service::pipelines::%s: references receiver %q which is not configured", pipelineID.String(), ref) } // Validate pipeline processor name references. for _, ref := range pipeline.Processors { // Check that the name referenced in the pipeline's processors exists in the top-level processors. if cfg.Processors[ref] == nil { - return fmt.Errorf("service::pipelines::%s: references processor %q which is not configured", pipelineID, ref) + return fmt.Errorf("service::pipelines::%s: references processor %q which is not configured", pipelineID.String(), ref) } } @@ -150,7 +150,7 @@ func (cfg *Config) Validate() error { if _, ok := cfg.Connectors[ref]; ok { continue } - return fmt.Errorf("service::pipelines::%s: references exporter %q which is not configured", pipelineID, ref) + return fmt.Errorf("service::pipelines::%s: references exporter %q which is not configured", pipelineID.String(), ref) } } return nil diff --git a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go index 6360b536509..440ffad5254 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/configprovider.go @@ -8,12 +8,6 @@ import ( "fmt" "go.opentelemetry.io/collector/confmap" - "go.opentelemetry.io/collector/confmap/converter/expandconverter" - "go.opentelemetry.io/collector/confmap/provider/envprovider" - "go.opentelemetry.io/collector/confmap/provider/fileprovider" - "go.opentelemetry.io/collector/confmap/provider/httpprovider" - "go.opentelemetry.io/collector/confmap/provider/httpsprovider" - "go.opentelemetry.io/collector/confmap/provider/yamlprovider" ) // ConfigProvider provides the service configuration. @@ -50,25 +44,11 @@ type ConfigProvider interface { Shutdown(ctx context.Context) error } -// ConfmapProvider is an optional interface to be implemented by ConfigProviders -// to provide confmap.Conf objects representing a marshaled version of the -// Collector's configuration. -// -// The purpose of this interface is that otelcol.ConfigProvider structs do not -// necessarily need to use confmap.Conf as their underlying config structure. -type ConfmapProvider interface { - // GetConfmap resolves the Collector's configuration and provides it as a confmap.Conf object. - // - // Should never be called concurrently with itself or any ConfigProvider method. - GetConfmap(ctx context.Context) (*confmap.Conf, error) -} - type configProvider struct { mapResolver *confmap.Resolver } var _ ConfigProvider = (*configProvider)(nil) -var _ ConfmapProvider = (*configProvider)(nil) // ConfigProviderSettings are the settings to configure the behavior of the ConfigProvider. type ConfigProviderSettings struct { @@ -121,28 +101,3 @@ func (cm *configProvider) Watch() <-chan error { func (cm *configProvider) Shutdown(ctx context.Context) error { return cm.mapResolver.Shutdown(ctx) } - -func (cm *configProvider) GetConfmap(ctx context.Context) (*confmap.Conf, error) { - conf, err := cm.mapResolver.Resolve(ctx) - if err != nil { - return nil, fmt.Errorf("cannot resolve the configuration: %w", err) - } - - return conf, nil -} - -func newDefaultConfigProviderSettings(uris []string) ConfigProviderSettings { - return ConfigProviderSettings{ - ResolverSettings: confmap.ResolverSettings{ - URIs: uris, - ProviderFactories: []confmap.ProviderFactory{ - fileprovider.NewFactory(), - envprovider.NewFactory(), - yamlprovider.NewFactory(), - httpprovider.NewFactory(), - httpsprovider.NewFactory(), - }, - ConverterFactories: []confmap.ConverterFactory{expandconverter.NewFactory()}, - }, - } -} diff --git a/vendor/go.opentelemetry.io/collector/otelcol/factories.go b/vendor/go.opentelemetry.io/collector/otelcol/factories.go index a61bd05060b..7528fa00666 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/factories.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/factories.go @@ -29,4 +29,19 @@ type Factories struct { // Connectors maps connector type names in the config to the respective factory. Connectors map[component.Type]connector.Factory + + // ReceiverModules maps receiver types to their respective go modules. + ReceiverModules map[component.Type]string + + // ProcessorModules maps processor types to their respective go modules. + ProcessorModules map[component.Type]string + + // ExporterModules maps exporter types to their respective go modules. + ExporterModules map[component.Type]string + + // ExtensionModules maps extension types to their respective go modules. + ExtensionModules map[component.Type]string + + // ConnectorModules maps connector types to their respective go modules. + ConnectorModules map[component.Type]string } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go b/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go index 7f864fdfd32..e60ab5d29b4 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/internal/configunmarshaler/configs.go @@ -4,6 +4,7 @@ package configunmarshaler // import "go.opentelemetry.io/collector/otelcol/internal/configunmarshaler" import ( + "errors" "fmt" "golang.org/x/exp/maps" @@ -31,19 +32,25 @@ func (c *Configs[F]) Unmarshal(conf *confmap.Conf) error { // Prepare resulting map. c.cfgs = make(map[component.ID]component.Config) // Iterate over raw configs and create a config for each. - for id, value := range rawCfgs { + for id := range rawCfgs { // Find factory based on component kind and type that we read from config source. factory, ok := c.factories[id.Type()] if !ok { return errorUnknownType(id, maps.Keys(c.factories)) } + // Get the configuration from the confmap.Conf to preserve internal representation. + sub, err := conf.Sub(id.String()) + if err != nil { + return errorUnmarshalError(id, err) + } + // Create the default config for this component. cfg := factory.CreateDefaultConfig() // Now that the default config struct is created we can Unmarshal into it, // and it will apply user-defined config on top of the default. - if err := confmap.NewFromStringMap(value).Unmarshal(&cfg); err != nil { + if err := sub.Unmarshal(&cfg); err != nil { return errorUnmarshalError(id, err) } @@ -58,6 +65,9 @@ func (c *Configs[F]) Configs() map[component.ID]component.Config { } func errorUnknownType(id component.ID, factories []component.Type) error { + if id.Type().String() == "logging" { + return errors.New("the logging exporter has been deprecated, use the debug exporter instead") + } return fmt.Errorf("unknown type: %q for id: %q (valid values: %v)", id.Type(), id, factories) } diff --git a/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go b/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go index 39b56f74890..8f48fe7207d 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/internal/grpclog/logger.go @@ -27,7 +27,7 @@ func SetLogger(baseLogger *zap.Logger, loglevel zapcore.Level) *zapgrpc.Logger { c = core } return c.With([]zapcore.Field{zap.Bool("grpc_log", true)}) - }))) + }), zap.AddCallerSkip(5))) grpclog.SetLoggerV2(logger) return logger diff --git a/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go b/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go index 4967b0a731d..8f2b8e583a1 100644 --- a/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go +++ b/vendor/go.opentelemetry.io/collector/otelcol/unmarshaler.go @@ -27,7 +27,6 @@ type configSettings struct { // unmarshal the configSettings from a confmap.Conf. // After the config is unmarshalled, `Validate()` must be called to validate. func unmarshal(v *confmap.Conf, factories Factories) (*configSettings, error) { - telFactory := telemetry.NewFactory() defaultTelConfig := *telFactory.CreateDefaultConfig().(*telemetry.Config) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go new file mode 100644 index 00000000000..5b4e6f53ceb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/profileid.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data // import "go.opentelemetry.io/collector/pdata/internal/data" + +import ( + "errors" + + "github.com/gogo/protobuf/proto" +) + +const profileIDSize = 16 + +var ( + errMarshalProfileID = errors.New("marshal: invalid buffer length for ProfileID") + errUnmarshalProfileID = errors.New("unmarshal: invalid ProfileID length") +) + +// ProfileID is a custom data type that is used for all profile_id fields in OTLP +// Protobuf messages. +type ProfileID [profileIDSize]byte + +var _ proto.Sizer = (*SpanID)(nil) + +// Size returns the size of the data to serialize. +func (tid ProfileID) Size() int { + if tid.IsEmpty() { + return 0 + } + return profileIDSize +} + +// IsEmpty returns true if id contains at leas one non-zero byte. +func (tid ProfileID) IsEmpty() bool { + return tid == [profileIDSize]byte{} +} + +// MarshalTo converts profile ID into a binary representation. Called by Protobuf serialization. +func (tid ProfileID) MarshalTo(data []byte) (n int, err error) { + if tid.IsEmpty() { + return 0, nil + } + + if len(data) < profileIDSize { + return 0, errMarshalProfileID + } + + return copy(data, tid[:]), nil +} + +// Unmarshal inflates this profile ID from binary representation. Called by Protobuf serialization. +func (tid *ProfileID) Unmarshal(data []byte) error { + if len(data) == 0 { + *tid = [profileIDSize]byte{} + return nil + } + + if len(data) != profileIDSize { + return errUnmarshalProfileID + } + + copy(tid[:], data) + return nil +} + +// MarshalJSON converts profile id into a hex string enclosed in quotes. +func (tid ProfileID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates profile id from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func (tid *ProfileID) UnmarshalJSON(data []byte) error { + *tid = [profileIDSize]byte{} + return unmarshalJSON(tid[:], data) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go similarity index 84% rename from vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go index f998cd3f8c5..9e0475e6875 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental/profiles_service.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development/profiles_service.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto +// source: opentelemetry/proto/collector/profiles/v1development/profiles_service.proto -package v1experimental +package v1development import ( context "context" @@ -16,7 +16,7 @@ import ( codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" - v1experimental "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" + v1development "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) // Reference imports to suppress errors if they are not otherwise used. @@ -36,14 +36,14 @@ type ExportProfilesServiceRequest struct { // element. Intermediary nodes (such as OpenTelemetry Collector) that receive // data from multiple origins typically batch the data before forwarding further and // in that case this array will contain multiple elements. - ResourceProfiles []*v1experimental.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` + ResourceProfiles []*v1development.ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` } func (m *ExportProfilesServiceRequest) Reset() { *m = ExportProfilesServiceRequest{} } func (m *ExportProfilesServiceRequest) String() string { return proto.CompactTextString(m) } func (*ExportProfilesServiceRequest) ProtoMessage() {} func (*ExportProfilesServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3d903b74e05b443d, []int{0} + return fileDescriptor_ad3943ce836e7720, []int{0} } func (m *ExportProfilesServiceRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -72,7 +72,7 @@ func (m *ExportProfilesServiceRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ExportProfilesServiceRequest proto.InternalMessageInfo -func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1experimental.ResourceProfiles { +func (m *ExportProfilesServiceRequest) GetResourceProfiles() []*v1development.ResourceProfiles { if m != nil { return m.ResourceProfiles } @@ -102,7 +102,7 @@ func (m *ExportProfilesServiceResponse) Reset() { *m = ExportProfilesSer func (m *ExportProfilesServiceResponse) String() string { return proto.CompactTextString(m) } func (*ExportProfilesServiceResponse) ProtoMessage() {} func (*ExportProfilesServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3d903b74e05b443d, []int{1} + return fileDescriptor_ad3943ce836e7720, []int{1} } func (m *ExportProfilesServiceResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -158,7 +158,7 @@ func (m *ExportProfilesPartialSuccess) Reset() { *m = ExportProfilesPart func (m *ExportProfilesPartialSuccess) String() string { return proto.CompactTextString(m) } func (*ExportProfilesPartialSuccess) ProtoMessage() {} func (*ExportProfilesPartialSuccess) Descriptor() ([]byte, []int) { - return fileDescriptor_3d903b74e05b443d, []int{2} + return fileDescriptor_ad3943ce836e7720, []int{2} } func (m *ExportProfilesPartialSuccess) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -202,45 +202,45 @@ func (m *ExportProfilesPartialSuccess) GetErrorMessage() string { } func init() { - proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesServiceRequest") - proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesServiceResponse") - proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1experimental.ExportProfilesPartialSuccess") + proto.RegisterType((*ExportProfilesServiceRequest)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceRequest") + proto.RegisterType((*ExportProfilesServiceResponse)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesServiceResponse") + proto.RegisterType((*ExportProfilesPartialSuccess)(nil), "opentelemetry.proto.collector.profiles.v1development.ExportProfilesPartialSuccess") } func init() { - proto.RegisterFile("opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto", fileDescriptor_3d903b74e05b443d) -} - -var fileDescriptor_3d903b74e05b443d = []byte{ - // 437 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0x3f, 0xcb, 0xd3, 0x40, - 0x18, 0xcf, 0xf5, 0x95, 0x17, 0xbc, 0xaa, 0xd5, 0xd0, 0xa1, 0x14, 0x8d, 0x25, 0x2e, 0x01, 0xe1, - 0x42, 0x2b, 0x05, 0x11, 0x5c, 0x2a, 0xdd, 0x14, 0x43, 0x5a, 0x1c, 0x44, 0x08, 0x31, 0x7d, 0x0c, - 0x29, 0x69, 0xee, 0xbc, 0xbb, 0x96, 0xba, 0x89, 0xa3, 0x93, 0x1f, 0xc2, 0xc9, 0xdd, 0xef, 0x50, - 0xb7, 0x8e, 0x4e, 0x22, 0xed, 0x17, 0x79, 0x49, 0xae, 0x09, 0x4d, 0x68, 0x29, 0x94, 0x6e, 0x77, - 0xbf, 0xe3, 0xf7, 0xe7, 0xf9, 0x1d, 0x0f, 0x7e, 0x4d, 0x19, 0x24, 0x12, 0x62, 0x98, 0x81, 0xe4, - 0x5f, 0x6c, 0xc6, 0xa9, 0xa4, 0x76, 0x40, 0xe3, 0x18, 0x02, 0x49, 0x79, 0x7a, 0xff, 0x14, 0xc5, - 0x20, 0xec, 0x45, 0x17, 0x96, 0x0c, 0x78, 0x34, 0x83, 0x44, 0xfa, 0x71, 0x81, 0x7b, 0x02, 0xf8, - 0x22, 0x0a, 0x80, 0x64, 0x44, 0xbd, 0x5f, 0x52, 0x53, 0x20, 0x29, 0xd4, 0x48, 0xce, 0x22, 0x65, - 0xb5, 0x76, 0x33, 0xa4, 0x21, 0x55, 0xd6, 0xe9, 0x49, 0xf1, 0xda, 0x2f, 0x0e, 0x45, 0x3b, 0x15, - 0x48, 0x71, 0xcd, 0xef, 0x08, 0x3f, 0x1c, 0x2e, 0x19, 0xe5, 0xd2, 0xd9, 0x3d, 0x8c, 0x54, 0x50, - 0x17, 0x3e, 0xcf, 0x41, 0x48, 0x7d, 0x8a, 0x1f, 0x70, 0x10, 0x74, 0xce, 0x03, 0xf0, 0x72, 0x6e, - 0x0b, 0x75, 0xae, 0xac, 0x7a, 0xef, 0x25, 0x39, 0x34, 0xc5, 0x91, 0xec, 0xc4, 0xdd, 0xa9, 0xe4, - 0x3e, 0xee, 0x7d, 0x5e, 0x41, 0xcc, 0x9f, 0x08, 0x3f, 0x3a, 0x12, 0x46, 0x30, 0x9a, 0x08, 0xd0, - 0xbf, 0x21, 0xdc, 0x60, 0x3e, 0x97, 0x91, 0x1f, 0x7b, 0x62, 0x1e, 0x04, 0x20, 0xd2, 0x30, 0xc8, - 0xaa, 0xf7, 0x46, 0xe4, 0xac, 0x4a, 0x49, 0xd9, 0xcf, 0x51, 0xda, 0x23, 0x25, 0x3d, 0xb8, 0xb5, - 0xfa, 0xf7, 0x58, 0x73, 0xef, 0xb1, 0x12, 0x6a, 0xb2, 0x6a, 0x65, 0x65, 0x96, 0xfe, 0x34, 0xad, - 0x6c, 0x0a, 0x81, 0x84, 0xc9, 0x7e, 0x65, 0xc8, 0xba, 0x4a, 0x67, 0x56, 0x0f, 0x39, 0x55, 0x7f, - 0x82, 0xef, 0x02, 0xe7, 0x94, 0x7b, 0x33, 0x10, 0xc2, 0x0f, 0xa1, 0x55, 0xeb, 0x20, 0xeb, 0xb6, - 0x7b, 0x27, 0x03, 0xdf, 0x28, 0xac, 0xf7, 0x07, 0xe1, 0x46, 0xa5, 0x12, 0xfd, 0x37, 0xc2, 0xd7, - 0x2a, 0x86, 0x7e, 0x99, 0xd9, 0xcb, 0x1f, 0xdf, 0x1e, 0x5f, 0x56, 0x54, 0x7d, 0xa0, 0xa9, 0x0d, - 0xbe, 0xd6, 0x56, 0x1b, 0x03, 0xad, 0x37, 0x06, 0xfa, 0xbf, 0x31, 0xd0, 0x8f, 0xad, 0xa1, 0xad, - 0xb7, 0x86, 0xf6, 0x77, 0x6b, 0x68, 0xf8, 0x79, 0x44, 0xcf, 0x33, 0x1d, 0x34, 0x2b, 0x7e, 0x4e, - 0xca, 0x73, 0xd0, 0xfb, 0x0f, 0x61, 0x55, 0x31, 0x2a, 0x6d, 0xed, 0xc4, 0x97, 0xbe, 0x1d, 0x25, - 0x12, 0x78, 0xe2, 0xc7, 0x76, 0x76, 0xcb, 0x2c, 0x43, 0x48, 0x4e, 0x2f, 0xf7, 0xaf, 0x5a, 0xff, - 0x2d, 0x83, 0x64, 0x5c, 0x68, 0x67, 0xae, 0xe4, 0x55, 0x91, 0x36, 0x0f, 0x45, 0xde, 0x75, 0x87, - 0x7b, 0xbc, 0x8f, 0xd7, 0x99, 0xc7, 0xb3, 0x9b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x3d, 0x57, - 0xb0, 0x54, 0x04, 0x00, 0x00, + proto.RegisterFile("opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", fileDescriptor_ad3943ce836e7720) +} + +var fileDescriptor_ad3943ce836e7720 = []byte{ + // 438 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0x4d, 0x8b, 0xd3, 0x40, + 0x18, 0xce, 0xb4, 0x52, 0x70, 0xaa, 0x56, 0x43, 0x0f, 0xa5, 0x68, 0x2c, 0xf1, 0x12, 0x10, 0x26, + 0xb4, 0x16, 0x44, 0xf0, 0x54, 0xf5, 0x24, 0x62, 0x48, 0xc5, 0x83, 0x1e, 0x42, 0x4c, 0x5f, 0x43, + 0x24, 0xcd, 0x8c, 0x33, 0xd3, 0xa2, 0x47, 0x8f, 0xde, 0xf6, 0x3f, 0xec, 0x6d, 0xaf, 0xfb, 0x23, + 0xb6, 0xc7, 0x1e, 0xf7, 0xb4, 0x2c, 0xed, 0xef, 0x58, 0x58, 0x92, 0x69, 0xb2, 0x9b, 0xd0, 0xa5, + 0x50, 0x7a, 0x9b, 0x79, 0x86, 0xe7, 0xe3, 0x7d, 0x86, 0x17, 0x7f, 0xa4, 0x0c, 0x12, 0x09, 0x31, + 0x4c, 0x41, 0xf2, 0xbf, 0x36, 0xe3, 0x54, 0x52, 0x3b, 0xa0, 0x71, 0x0c, 0x81, 0xa4, 0x3c, 0xbd, + 0xff, 0x8c, 0x62, 0x10, 0xf6, 0xbc, 0x3f, 0x81, 0x39, 0xc4, 0x94, 0x4d, 0x21, 0x91, 0x05, 0xec, + 0x09, 0xe0, 0xf3, 0x28, 0x00, 0x92, 0xf1, 0xf4, 0x61, 0x49, 0x4c, 0x81, 0xa4, 0x10, 0x23, 0x39, + 0x8b, 0x94, 0xc4, 0xba, 0xed, 0x90, 0x86, 0x54, 0x19, 0xa7, 0x27, 0x45, 0xeb, 0xbe, 0xd9, 0x16, + 0x6c, 0x47, 0x1c, 0x45, 0x35, 0xff, 0x23, 0xfc, 0xf4, 0xc3, 0x1f, 0x46, 0xb9, 0x74, 0x36, 0x0f, + 0x63, 0x15, 0xd3, 0x85, 0xdf, 0x33, 0x10, 0x52, 0x8f, 0xf0, 0x13, 0x0e, 0x82, 0xce, 0x78, 0x00, + 0x5e, 0xce, 0xed, 0xa0, 0x5e, 0xdd, 0x6a, 0x0e, 0xde, 0x92, 0x6d, 0x33, 0x6c, 0x4f, 0x4e, 0xdc, + 0x8d, 0x48, 0x6e, 0xe3, 0x3e, 0xe6, 0x15, 0xc4, 0x3c, 0x46, 0xf8, 0xd9, 0x1d, 0x59, 0x04, 0xa3, + 0x89, 0x00, 0xfd, 0x1f, 0xc2, 0x2d, 0xe6, 0x73, 0x19, 0xf9, 0xb1, 0x27, 0x66, 0x41, 0x00, 0x22, + 0xcd, 0x82, 0xac, 0xe6, 0xc0, 0x25, 0xfb, 0xf4, 0x49, 0xca, 0x76, 0x8e, 0x92, 0x1e, 0x2b, 0xe5, + 0xd1, 0xbd, 0xc5, 0xc5, 0x73, 0xcd, 0x7d, 0xc4, 0x4a, 0xa8, 0xc9, 0xaa, 0x85, 0x95, 0x59, 0xfa, + 0xcb, 0xb4, 0xb0, 0x5f, 0x10, 0x48, 0x98, 0xdc, 0x2e, 0x0c, 0x59, 0xf5, 0x74, 0x64, 0xf5, 0x90, + 0x53, 0xf5, 0x17, 0xf8, 0x21, 0x70, 0x4e, 0xb9, 0x37, 0x05, 0x21, 0xfc, 0x10, 0x3a, 0xb5, 0x1e, + 0xb2, 0xee, 0xbb, 0x0f, 0x32, 0xf0, 0x93, 0xc2, 0x06, 0x67, 0x08, 0xb7, 0x2a, 0x8d, 0xe8, 0xa7, + 0x08, 0x37, 0x54, 0x0c, 0xfd, 0x20, 0xa3, 0x97, 0x7f, 0xbd, 0x3b, 0x3e, 0xa8, 0xa6, 0xfa, 0x3d, + 0x53, 0x1b, 0x5d, 0xa1, 0xc5, 0xca, 0x40, 0xcb, 0x95, 0x81, 0x2e, 0x57, 0x06, 0x3a, 0x5a, 0x1b, + 0xda, 0x72, 0x6d, 0x68, 0xe7, 0x6b, 0x43, 0xc3, 0xaf, 0x23, 0xba, 0x97, 0xe7, 0xa8, 0x5d, 0xb1, + 0x73, 0x52, 0x9a, 0x83, 0xbe, 0x7d, 0x0f, 0xab, 0x82, 0x51, 0x69, 0x5b, 0x27, 0xbe, 0xf4, 0xed, + 0x28, 0x91, 0xc0, 0x13, 0x3f, 0xb6, 0xb3, 0x5b, 0xe6, 0x18, 0x42, 0xb2, 0x73, 0xa9, 0x4f, 0x6a, + 0xc3, 0xcf, 0x0c, 0x92, 0x2f, 0x85, 0x74, 0x66, 0x4a, 0xde, 0x15, 0x59, 0xf3, 0x4c, 0xe4, 0x6b, + 0xff, 0xfd, 0x0d, 0xed, 0x47, 0x23, 0x73, 0x78, 0x75, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x8f, 0x35, + 0x8c, 0xea, 0x4a, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -270,7 +270,7 @@ func NewProfilesServiceClient(cc *grpc.ClientConn) ProfilesServiceClient { func (c *profilesServiceClient) Export(ctx context.Context, in *ExportProfilesServiceRequest, opts ...grpc.CallOption) (*ExportProfilesServiceResponse, error) { out := new(ExportProfilesServiceResponse) - err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1experimental.ProfilesService/Export", in, out, opts...) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", in, out, opts...) if err != nil { return nil, err } @@ -306,7 +306,7 @@ func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec f } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/opentelemetry.proto.collector.profiles.v1experimental.ProfilesService/Export", + FullMethod: "/opentelemetry.proto.collector.profiles.v1development.ProfilesService/Export", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ProfilesServiceServer).Export(ctx, req.(*ExportProfilesServiceRequest)) @@ -315,7 +315,7 @@ func _ProfilesService_Export_Handler(srv interface{}, ctx context.Context, dec f } var _ProfilesService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opentelemetry.proto.collector.profiles.v1experimental.ProfilesService", + ServiceName: "opentelemetry.proto.collector.profiles.v1development.ProfilesService", HandlerType: (*ProfilesServiceServer)(nil), Methods: []grpc.MethodDesc{ { @@ -324,7 +324,7 @@ var _ProfilesService_serviceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "opentelemetry/proto/collector/profiles/v1experimental/profiles_service.proto", + Metadata: "opentelemetry/proto/collector/profiles/v1development/profiles_service.proto", } func (m *ExportProfilesServiceRequest) Marshal() (dAtA []byte, err error) { @@ -549,7 +549,7 @@ func (m *ExportProfilesServiceRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceProfiles = append(m.ResourceProfiles, &v1experimental.ResourceProfiles{}) + m.ResourceProfiles = append(m.ResourceProfiles, &v1development.ResourceProfiles{}) if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go index 3649bd83f81..66143337964 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go @@ -160,6 +160,25 @@ func (DataPointFlags) EnumDescriptor() ([]byte, []int) { // storage, OR can be embedded by other protocols that transfer OTLP metrics // data but do not implement the OTLP protocol. // +// MetricsData +// └─── ResourceMetrics +// +// ├── Resource +// ├── SchemaURL +// └── ScopeMetrics +// ├── Scope +// ├── SchemaURL +// └── Metric +// ├── Name +// ├── Description +// ├── Unit +// └── data +// ├── Gauge +// ├── Sum +// ├── Histogram +// ├── ExponentialHistogram +// └── Summary +// // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. @@ -855,6 +874,9 @@ func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. +// Summary metrics do not have an aggregation temporality field. This is +// because the count and sum fields of a SummaryDataPoint are assumed to be +// cumulative values. type Summary struct { DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` } @@ -1644,7 +1666,8 @@ func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 { } // SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. +// time-varying values of a Summary metric. The count and sum fields represent +// cumulative values. type SummaryDataPoint struct { // The set of key/value pairs that uniquely identify the timeseries from // where this point belongs. The list may be empty (may contain 0 elements). diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go similarity index 55% rename from vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go rename to vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go index 64c91b3221e..faa8630469c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/pprofextended.pb.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development/profiles.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/profiles/v1experimental/pprofextended.proto +// source: opentelemetry/proto/profiles/v1development/profiles.proto -package v1experimental +package v1development import ( fmt "fmt" @@ -13,7 +13,8 @@ import ( proto "github.com/gogo/protobuf/proto" go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" ) // Reference imports to suppress errors if they are not otherwise used. @@ -88,7 +89,7 @@ const ( //11. A request is received, the system measures 1 request. //12. The 1 second collection cycle ends. A metric is exported for the //number of requests received over the interval of time t_1 to - //t_0+1 with a value of 1. + //t_1+1 with a value of 1. // //Note: Even though, when reporting changes since last report time, using //CUMULATIVE is valid, it is not recommended. @@ -112,42 +113,213 @@ func (x AggregationTemporality) String() string { } func (AggregationTemporality) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{0} + return fileDescriptor_ddd0cf081a2fe76f, []int{0} +} + +// ProfilesData represents the profiles data that can be stored in persistent storage, +// OR can be embedded by other protocols that transfer OTLP profiles data but do not +// implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type ProfilesData struct { + // An array of ResourceProfiles. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` +} + +func (m *ProfilesData) Reset() { *m = ProfilesData{} } +func (m *ProfilesData) String() string { return proto.CompactTextString(m) } +func (*ProfilesData) ProtoMessage() {} +func (*ProfilesData) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{0} +} +func (m *ProfilesData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProfilesData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProfilesData.Merge(m, src) +} +func (m *ProfilesData) XXX_Size() int { + return m.Size() +} +func (m *ProfilesData) XXX_DiscardUnknown() { + xxx_messageInfo_ProfilesData.DiscardUnknown(m) } -// Indicates the semantics of the build_id field. -type BuildIdKind int32 +var xxx_messageInfo_ProfilesData proto.InternalMessageInfo -const ( - // Linker-generated build ID, stored in the ELF binary notes. - BuildIdKind_BUILD_ID_LINKER BuildIdKind = 0 - // Build ID based on the content hash of the binary. Currently no particular - // hashing approach is standardized, so a given producer needs to define it - // themselves and thus unlike BUILD_ID_LINKER this kind of hash is producer-specific. - // We may choose to provide a standardized stable hash recommendation later. - BuildIdKind_BUILD_ID_BINARY_HASH BuildIdKind = 1 -) +func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles { + if m != nil { + return m.ResourceProfiles + } + return nil +} + +// A collection of ScopeProfiles from a Resource. +type ResourceProfiles struct { + // The resource for the profiles in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of ScopeProfiles that originate from a resource. + ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_profiles" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} } +func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) } +func (*ResourceProfiles) ProtoMessage() {} +func (*ResourceProfiles) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{1} +} +func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceProfiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceProfiles.Merge(m, src) +} +func (m *ResourceProfiles) XXX_Size() int { + return m.Size() +} +func (m *ResourceProfiles) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceProfiles.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo + +func (m *ResourceProfiles) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles { + if m != nil { + return m.ScopeProfiles + } + return nil +} -var BuildIdKind_name = map[int32]string{ - 0: "BUILD_ID_LINKER", - 1: "BUILD_ID_BINARY_HASH", +func (m *ResourceProfiles) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A collection of Profiles produced by an InstrumentationScope. +type ScopeProfiles struct { + // The instrumentation scope information for the profiles in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` + // A list of Profiles that originate from an instrumentation scope. + Profiles []*Profile `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the profile data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all profiles in the "profiles" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} } +func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) } +func (*ScopeProfiles) ProtoMessage() {} +func (*ScopeProfiles) Descriptor() ([]byte, []int) { + return fileDescriptor_ddd0cf081a2fe76f, []int{2} +} +func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScopeProfiles) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeProfiles.Merge(m, src) +} +func (m *ScopeProfiles) XXX_Size() int { + return m.Size() +} +func (m *ScopeProfiles) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeProfiles.DiscardUnknown(m) } -var BuildIdKind_value = map[string]int32{ - "BUILD_ID_LINKER": 0, - "BUILD_ID_BINARY_HASH": 1, +var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo + +func (m *ScopeProfiles) GetScope() v11.InstrumentationScope { + if m != nil { + return m.Scope + } + return v11.InstrumentationScope{} } -func (x BuildIdKind) String() string { - return proto.EnumName(BuildIdKind_name, int32(x)) +func (m *ScopeProfiles) GetProfiles() []*Profile { + if m != nil { + return m.Profiles + } + return nil } -func (BuildIdKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{1} +func (m *ScopeProfiles) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" } // Represents a complete profile, including sample types, samples, // mappings to binaries, locations, functions, string table, and additional metadata. +// It modifies and annotates pprof Profile with OpenTelemetry specific fields. +// +// Note that whilst fields in this message retain the name and field id from pprof in most cases +// for ease of understanding data migration, it is not intended that pprof:Profile and +// OpenTelemetry:Profile encoding be wire compatible. type Profile struct { // A description of the samples associated with each Sample.value. // For a cpu profile this might be: @@ -157,58 +329,85 @@ type Profile struct { // If one of the values represents the number of events represented // by the sample, by convention it should be at index 0 and use // sample_type.unit == "count". - SampleType []ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type"` + SampleType []*ValueType `protobuf:"bytes,1,rep,name=sample_type,json=sampleType,proto3" json:"sample_type,omitempty"` // The set of samples recorded in this profile. - Sample []Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample"` + Sample []*Sample `protobuf:"bytes,2,rep,name=sample,proto3" json:"sample,omitempty"` // Mapping from address ranges to the image/binary/library mapped // into that address range. mapping[0] will be the main binary. - Mapping []Mapping `protobuf:"bytes,3,rep,name=mapping,proto3" json:"mapping"` + // If multiple binaries contribute to the Profile and no main + // binary can be identified, mapping[0] has no special meaning. + MappingTable []*Mapping `protobuf:"bytes,3,rep,name=mapping_table,json=mappingTable,proto3" json:"mapping_table,omitempty"` // Locations referenced by samples via location_indices. - Location []Location `protobuf:"bytes,4,rep,name=location,proto3" json:"location"` + LocationTable []*Location `protobuf:"bytes,4,rep,name=location_table,json=locationTable,proto3" json:"location_table,omitempty"` // Array of locations referenced by samples. - LocationIndices []int64 `protobuf:"varint,15,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"` + LocationIndices []int32 `protobuf:"varint,5,rep,packed,name=location_indices,json=locationIndices,proto3" json:"location_indices,omitempty"` // Functions referenced by locations. - Function []Function `protobuf:"bytes,5,rep,name=function,proto3" json:"function"` + FunctionTable []*Function `protobuf:"bytes,6,rep,name=function_table,json=functionTable,proto3" json:"function_table,omitempty"` // Lookup table for attributes. - AttributeTable []v1.KeyValue `protobuf:"bytes,16,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"` + AttributeTable []v11.KeyValue `protobuf:"bytes,7,rep,name=attribute_table,json=attributeTable,proto3" json:"attribute_table"` // Represents a mapping between Attribute Keys and Units. - AttributeUnits []AttributeUnit `protobuf:"bytes,17,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units"` + AttributeUnits []*AttributeUnit `protobuf:"bytes,8,rep,name=attribute_units,json=attributeUnits,proto3" json:"attribute_units,omitempty"` // Lookup table for links. - LinkTable []Link `protobuf:"bytes,18,rep,name=link_table,json=linkTable,proto3" json:"link_table"` + LinkTable []*Link `protobuf:"bytes,9,rep,name=link_table,json=linkTable,proto3" json:"link_table,omitempty"` // A common table for strings referenced by various messages. // string_table[0] must always be "". - StringTable []string `protobuf:"bytes,6,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"` - // frames with Function.function_name fully matching the following - // regexp will be dropped from the samples, along with their successors. - DropFrames int64 `protobuf:"varint,7,opt,name=drop_frames,json=dropFrames,proto3" json:"drop_frames,omitempty"` - // frames with Function.function_name fully matching the following - // regexp will be kept, even if it matches drop_frames. - KeepFrames int64 `protobuf:"varint,8,opt,name=keep_frames,json=keepFrames,proto3" json:"keep_frames,omitempty"` + StringTable []string `protobuf:"bytes,10,rep,name=string_table,json=stringTable,proto3" json:"string_table,omitempty"` // Time of collection (UTC) represented as nanoseconds past the epoch. - TimeNanos int64 `protobuf:"varint,9,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"` + TimeNanos int64 `protobuf:"varint,11,opt,name=time_nanos,json=timeNanos,proto3" json:"time_nanos,omitempty"` // Duration of the profile, if a duration makes sense. - DurationNanos int64 `protobuf:"varint,10,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"` + DurationNanos int64 `protobuf:"varint,12,opt,name=duration_nanos,json=durationNanos,proto3" json:"duration_nanos,omitempty"` // The kind of events between sampled occurrences. // e.g [ "cpu","cycles" ] or [ "heap","bytes" ] - PeriodType ValueType `protobuf:"bytes,11,opt,name=period_type,json=periodType,proto3" json:"period_type"` + PeriodType ValueType `protobuf:"bytes,13,opt,name=period_type,json=periodType,proto3" json:"period_type"` // The number of events between sampled occurrences. - Period int64 `protobuf:"varint,12,opt,name=period,proto3" json:"period,omitempty"` + Period int64 `protobuf:"varint,14,opt,name=period,proto3" json:"period,omitempty"` // Free-form text associated with the profile. The text is displayed as is // to the user by the tools that read profiles (e.g. by pprof). This field // should not be used to store any machine-readable information, it is only // for human-friendly content. The profile must stay functional if this field // is cleaned. - Comment []int64 `protobuf:"varint,13,rep,packed,name=comment,proto3" json:"comment,omitempty"` + CommentStrindices []int32 `protobuf:"varint,15,rep,packed,name=comment_strindices,json=commentStrindices,proto3" json:"comment_strindices,omitempty"` // Index into the string table of the type of the preferred sample // value. If unset, clients should default to the last sample value. - DefaultSampleType int64 `protobuf:"varint,14,opt,name=default_sample_type,json=defaultSampleType,proto3" json:"default_sample_type,omitempty"` + DefaultSampleTypeStrindex int32 `protobuf:"varint,16,opt,name=default_sample_type_strindex,json=defaultSampleTypeStrindex,proto3" json:"default_sample_type_strindex,omitempty"` + // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with + // all zeroes is considered invalid. + // + // This field is required. + ProfileId go_opentelemetry_io_collector_pdata_internal_data.ProfileID `protobuf:"bytes,17,opt,name=profile_id,json=profileId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.ProfileID" json:"profile_id"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,18,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,19,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] + OriginalPayloadFormat string `protobuf:"bytes,20,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"` + // Original payload can be stored in this field. This can be useful for users who want to get the original payload. + // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. + // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. + // If the original payload is in pprof format, it SHOULD not be included in this field. + // The field is optional, however if it is present then equivalent converted data should be populated in other fields + // of this message as far as is practicable. + OriginalPayload []byte `protobuf:"bytes,21,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` } func (m *Profile) Reset() { *m = Profile{} } func (m *Profile) String() string { return proto.CompactTextString(m) } func (*Profile) ProtoMessage() {} func (*Profile) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{0} + return fileDescriptor_ddd0cf081a2fe76f, []int{3} } func (m *Profile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -237,63 +436,63 @@ func (m *Profile) XXX_DiscardUnknown() { var xxx_messageInfo_Profile proto.InternalMessageInfo -func (m *Profile) GetSampleType() []ValueType { +func (m *Profile) GetSampleType() []*ValueType { if m != nil { return m.SampleType } return nil } -func (m *Profile) GetSample() []Sample { +func (m *Profile) GetSample() []*Sample { if m != nil { return m.Sample } return nil } -func (m *Profile) GetMapping() []Mapping { +func (m *Profile) GetMappingTable() []*Mapping { if m != nil { - return m.Mapping + return m.MappingTable } return nil } -func (m *Profile) GetLocation() []Location { +func (m *Profile) GetLocationTable() []*Location { if m != nil { - return m.Location + return m.LocationTable } return nil } -func (m *Profile) GetLocationIndices() []int64 { +func (m *Profile) GetLocationIndices() []int32 { if m != nil { return m.LocationIndices } return nil } -func (m *Profile) GetFunction() []Function { +func (m *Profile) GetFunctionTable() []*Function { if m != nil { - return m.Function + return m.FunctionTable } return nil } -func (m *Profile) GetAttributeTable() []v1.KeyValue { +func (m *Profile) GetAttributeTable() []v11.KeyValue { if m != nil { return m.AttributeTable } return nil } -func (m *Profile) GetAttributeUnits() []AttributeUnit { +func (m *Profile) GetAttributeUnits() []*AttributeUnit { if m != nil { return m.AttributeUnits } return nil } -func (m *Profile) GetLinkTable() []Link { +func (m *Profile) GetLinkTable() []*Link { if m != nil { return m.LinkTable } @@ -307,20 +506,6 @@ func (m *Profile) GetStringTable() []string { return nil } -func (m *Profile) GetDropFrames() int64 { - if m != nil { - return m.DropFrames - } - return 0 -} - -func (m *Profile) GetKeepFrames() int64 { - if m != nil { - return m.KeepFrames - } - return 0 -} - func (m *Profile) GetTimeNanos() int64 { if m != nil { return m.TimeNanos @@ -349,33 +534,61 @@ func (m *Profile) GetPeriod() int64 { return 0 } -func (m *Profile) GetComment() []int64 { +func (m *Profile) GetCommentStrindices() []int32 { + if m != nil { + return m.CommentStrindices + } + return nil +} + +func (m *Profile) GetDefaultSampleTypeStrindex() int32 { + if m != nil { + return m.DefaultSampleTypeStrindex + } + return 0 +} + +func (m *Profile) GetAttributes() []v11.KeyValue { if m != nil { - return m.Comment + return m.Attributes } return nil } -func (m *Profile) GetDefaultSampleType() int64 { +func (m *Profile) GetDroppedAttributesCount() uint32 { if m != nil { - return m.DefaultSampleType + return m.DroppedAttributesCount } return 0 } +func (m *Profile) GetOriginalPayloadFormat() string { + if m != nil { + return m.OriginalPayloadFormat + } + return "" +} + +func (m *Profile) GetOriginalPayload() []byte { + if m != nil { + return m.OriginalPayload + } + return nil +} + // Represents a mapping between Attribute Keys and Units. type AttributeUnit struct { // Index into string table. - AttributeKey int64 `protobuf:"varint,1,opt,name=attribute_key,json=attributeKey,proto3" json:"attribute_key,omitempty"` + AttributeKeyStrindex int32 `protobuf:"varint,1,opt,name=attribute_key_strindex,json=attributeKeyStrindex,proto3" json:"attribute_key_strindex,omitempty"` // Index into string table. - Unit int64 `protobuf:"varint,2,opt,name=unit,proto3" json:"unit,omitempty"` + UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"` } func (m *AttributeUnit) Reset() { *m = AttributeUnit{} } func (m *AttributeUnit) String() string { return proto.CompactTextString(m) } func (*AttributeUnit) ProtoMessage() {} func (*AttributeUnit) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{1} + return fileDescriptor_ddd0cf081a2fe76f, []int{4} } func (m *AttributeUnit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -404,16 +617,16 @@ func (m *AttributeUnit) XXX_DiscardUnknown() { var xxx_messageInfo_AttributeUnit proto.InternalMessageInfo -func (m *AttributeUnit) GetAttributeKey() int64 { +func (m *AttributeUnit) GetAttributeKeyStrindex() int32 { if m != nil { - return m.AttributeKey + return m.AttributeKeyStrindex } return 0 } -func (m *AttributeUnit) GetUnit() int64 { +func (m *AttributeUnit) GetUnitStrindex() int32 { if m != nil { - return m.Unit + return m.UnitStrindex } return 0 } @@ -432,7 +645,7 @@ func (m *Link) Reset() { *m = Link{} } func (m *Link) String() string { return proto.CompactTextString(m) } func (*Link) ProtoMessage() {} func (*Link) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{2} + return fileDescriptor_ddd0cf081a2fe76f, []int{5} } func (m *Link) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -463,16 +676,16 @@ var xxx_messageInfo_Link proto.InternalMessageInfo // ValueType describes the type and units of a value, with an optional aggregation temporality. type ValueType struct { - Type int64 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` - Unit int64 `protobuf:"varint,2,opt,name=unit,proto3" json:"unit,omitempty"` - AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1experimental.AggregationTemporality" json:"aggregation_temporality,omitempty"` + TypeStrindex int32 `protobuf:"varint,1,opt,name=type_strindex,json=typeStrindex,proto3" json:"type_strindex,omitempty"` + UnitStrindex int32 `protobuf:"varint,2,opt,name=unit_strindex,json=unitStrindex,proto3" json:"unit_strindex,omitempty"` + AggregationTemporality AggregationTemporality `protobuf:"varint,3,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.profiles.v1development.AggregationTemporality" json:"aggregation_temporality,omitempty"` } func (m *ValueType) Reset() { *m = ValueType{} } func (m *ValueType) String() string { return proto.CompactTextString(m) } func (*ValueType) ProtoMessage() {} func (*ValueType) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{3} + return fileDescriptor_ddd0cf081a2fe76f, []int{6} } func (m *ValueType) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -501,16 +714,16 @@ func (m *ValueType) XXX_DiscardUnknown() { var xxx_messageInfo_ValueType proto.InternalMessageInfo -func (m *ValueType) GetType() int64 { +func (m *ValueType) GetTypeStrindex() int32 { if m != nil { - return m.Type + return m.TypeStrindex } return 0 } -func (m *ValueType) GetUnit() int64 { +func (m *ValueType) GetUnitStrindex() int32 { if m != nil { - return m.Unit + return m.UnitStrindex } return 0 } @@ -527,48 +740,35 @@ func (m *ValueType) GetAggregationTemporality() AggregationTemporality { // augmented with auxiliary information like the thread-id, some // indicator of a higher level request being handled etc. type Sample struct { - // The indices recorded here correspond to locations in Profile.location. - // The leaf is at location_index[0]. [deprecated, superseded by locations_start_index / locations_length] - LocationIndex []uint64 `protobuf:"varint,1,rep,packed,name=location_index,json=locationIndex,proto3" json:"location_index,omitempty"` - // locations_start_index along with locations_length refers to to a slice of locations in Profile.location. - // Supersedes location_index. - LocationsStartIndex uint64 `protobuf:"varint,7,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"` - // locations_length along with locations_start_index refers to a slice of locations in Profile.location. + // locations_start_index along with locations_length refers to to a slice of locations in Profile.location_indices. + LocationsStartIndex int32 `protobuf:"varint,1,opt,name=locations_start_index,json=locationsStartIndex,proto3" json:"locations_start_index,omitempty"` + // locations_length along with locations_start_index refers to a slice of locations in Profile.location_indices. // Supersedes location_index. - LocationsLength uint64 `protobuf:"varint,8,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"` - // A 128bit id that uniquely identifies this stacktrace, globally. Index into string table. [optional] - StacktraceIdIndex uint32 `protobuf:"varint,9,opt,name=stacktrace_id_index,json=stacktraceIdIndex,proto3" json:"stacktrace_id_index,omitempty"` + LocationsLength int32 `protobuf:"varint,2,opt,name=locations_length,json=locationsLength,proto3" json:"locations_length,omitempty"` // The type and unit of each value is defined by the corresponding // entry in Profile.sample_type. All samples must have the same // number of values, the same as the length of Profile.sample_type. // When aggregating multiple samples into a single sample, the // result has a list of values that is the element-wise sum of the // lists of the originals. - Value []int64 `protobuf:"varint,2,rep,packed,name=value,proto3" json:"value,omitempty"` - // label includes additional context for this sample. It can include - // things like a thread id, allocation size, etc. - // - // NOTE: While possible, having multiple values for the same label key is - // strongly discouraged and should never be used. Most tools (e.g. pprof) do - // not have good (or any) support for multi-value labels. And an even more - // discouraged case is having a string label and a numeric label of the same - // name on a sample. Again, possible to express, but should not be used. - // [deprecated, superseded by attributes] - Label []Label `protobuf:"bytes,3,rep,name=label,proto3" json:"label"` + Value []int64 `protobuf:"varint,3,rep,packed,name=value,proto3" json:"value,omitempty"` // References to attributes in Profile.attribute_table. [optional] - Attributes []uint64 `protobuf:"varint,10,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` + AttributeIndices []int32 `protobuf:"varint,4,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` // Reference to link in Profile.link_table. [optional] - Link uint64 `protobuf:"varint,12,opt,name=link,proto3" json:"link,omitempty"` + // + // Types that are valid to be assigned to LinkIndex_: + // *Sample_LinkIndex + LinkIndex_ isSample_LinkIndex_ `protobuf_oneof:"link_index_"` // Timestamps associated with Sample represented in nanoseconds. These timestamps are expected // to fall within the Profile's time range. [optional] - TimestampsUnixNano []uint64 `protobuf:"varint,13,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"` + TimestampsUnixNano []uint64 `protobuf:"varint,6,rep,packed,name=timestamps_unix_nano,json=timestampsUnixNano,proto3" json:"timestamps_unix_nano,omitempty"` } func (m *Sample) Reset() { *m = Sample{} } func (m *Sample) String() string { return proto.CompactTextString(m) } func (*Sample) ProtoMessage() {} func (*Sample) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{4} + return fileDescriptor_ddd0cf081a2fe76f, []int{7} } func (m *Sample) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -597,34 +797,39 @@ func (m *Sample) XXX_DiscardUnknown() { var xxx_messageInfo_Sample proto.InternalMessageInfo -func (m *Sample) GetLocationIndex() []uint64 { +type isSample_LinkIndex_ interface { + isSample_LinkIndex_() + MarshalTo([]byte) (int, error) + Size() int +} + +type Sample_LinkIndex struct { + LinkIndex int32 `protobuf:"varint,5,opt,name=link_index,json=linkIndex,proto3,oneof" json:"link_index,omitempty"` +} + +func (*Sample_LinkIndex) isSample_LinkIndex_() {} + +func (m *Sample) GetLinkIndex_() isSample_LinkIndex_ { if m != nil { - return m.LocationIndex + return m.LinkIndex_ } return nil } -func (m *Sample) GetLocationsStartIndex() uint64 { +func (m *Sample) GetLocationsStartIndex() int32 { if m != nil { return m.LocationsStartIndex } return 0 } -func (m *Sample) GetLocationsLength() uint64 { +func (m *Sample) GetLocationsLength() int32 { if m != nil { return m.LocationsLength } return 0 } -func (m *Sample) GetStacktraceIdIndex() uint32 { - if m != nil { - return m.StacktraceIdIndex - } - return 0 -} - func (m *Sample) GetValue() []int64 { if m != nil { return m.Value @@ -632,23 +837,16 @@ func (m *Sample) GetValue() []int64 { return nil } -func (m *Sample) GetLabel() []Label { - if m != nil { - return m.Label - } - return nil -} - -func (m *Sample) GetAttributes() []uint64 { +func (m *Sample) GetAttributeIndices() []int32 { if m != nil { - return m.Attributes + return m.AttributeIndices } return nil } -func (m *Sample) GetLink() uint64 { - if m != nil { - return m.Link +func (m *Sample) GetLinkIndex() int32 { + if x, ok := m.GetLinkIndex_().(*Sample_LinkIndex); ok { + return x.LinkIndex } return 0 } @@ -660,13 +858,20 @@ func (m *Sample) GetTimestampsUnixNano() []uint64 { return nil } +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Sample) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Sample_LinkIndex)(nil), + } +} + // Provides additional context for a sample, // such as thread ID or allocation size, with optional units. [deprecated] type Label struct { - Key int64 `protobuf:"varint,1,opt,name=key,proto3" json:"key,omitempty"` + KeyStrindex int32 `protobuf:"varint,1,opt,name=key_strindex,json=keyStrindex,proto3" json:"key_strindex,omitempty"` // At most one of the following must be present - Str int64 `protobuf:"varint,2,opt,name=str,proto3" json:"str,omitempty"` - Num int64 `protobuf:"varint,3,opt,name=num,proto3" json:"num,omitempty"` + StrStrindex int32 `protobuf:"varint,2,opt,name=str_strindex,json=strStrindex,proto3" json:"str_strindex,omitempty"` + Num int64 `protobuf:"varint,3,opt,name=num,proto3" json:"num,omitempty"` // Should only be present when num is present. // Specifies the units of num. // Use arbitrary string (for example, "requests") as a custom count unit. @@ -674,14 +879,14 @@ type Label struct { // Consumers may also interpret units like "bytes" and "kilobytes" as memory // units and units like "seconds" and "nanoseconds" as time units, // and apply appropriate unit conversions to these. - NumUnit int64 `protobuf:"varint,4,opt,name=num_unit,json=numUnit,proto3" json:"num_unit,omitempty"` + NumUnitStrindex int32 `protobuf:"varint,4,opt,name=num_unit_strindex,json=numUnitStrindex,proto3" json:"num_unit_strindex,omitempty"` } func (m *Label) Reset() { *m = Label{} } func (m *Label) String() string { return proto.CompactTextString(m) } func (*Label) ProtoMessage() {} func (*Label) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{5} + return fileDescriptor_ddd0cf081a2fe76f, []int{8} } func (m *Label) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -710,16 +915,16 @@ func (m *Label) XXX_DiscardUnknown() { var xxx_messageInfo_Label proto.InternalMessageInfo -func (m *Label) GetKey() int64 { +func (m *Label) GetKeyStrindex() int32 { if m != nil { - return m.Key + return m.KeyStrindex } return 0 } -func (m *Label) GetStr() int64 { +func (m *Label) GetStrStrindex() int32 { if m != nil { - return m.Str + return m.StrStrindex } return 0 } @@ -731,9 +936,9 @@ func (m *Label) GetNum() int64 { return 0 } -func (m *Label) GetNumUnit() int64 { +func (m *Label) GetNumUnitStrindex() int32 { if m != nil { - return m.NumUnit + return m.NumUnitStrindex } return 0 } @@ -741,38 +946,30 @@ func (m *Label) GetNumUnit() int64 { // Describes the mapping of a binary in memory, including its address range, // file offset, and metadata like build ID type Mapping struct { - // Unique nonzero id for the mapping. [deprecated] - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // Address at which the binary (or DLL) is loaded into memory. - MemoryStart uint64 `protobuf:"varint,2,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"` + MemoryStart uint64 `protobuf:"varint,1,opt,name=memory_start,json=memoryStart,proto3" json:"memory_start,omitempty"` // The limit of the address range occupied by this mapping. - MemoryLimit uint64 `protobuf:"varint,3,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` + MemoryLimit uint64 `protobuf:"varint,2,opt,name=memory_limit,json=memoryLimit,proto3" json:"memory_limit,omitempty"` // Offset in the binary that corresponds to the first mapped address. - FileOffset uint64 `protobuf:"varint,4,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"` + FileOffset uint64 `protobuf:"varint,3,opt,name=file_offset,json=fileOffset,proto3" json:"file_offset,omitempty"` // The object this entry is loaded from. This can be a filename on // disk for the main binary and shared libraries, or virtual // abstractions like "[vdso]". - Filename int64 `protobuf:"varint,5,opt,name=filename,proto3" json:"filename,omitempty"` - // A string that uniquely identifies a particular program version - // with high probability. E.g., for binaries generated by GNU tools, - // it could be the contents of the .note.gnu.build-id field. - BuildId int64 `protobuf:"varint,6,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - // Specifies the kind of build id. See BuildIdKind enum for more details [optional] - BuildIdKind BuildIdKind `protobuf:"varint,11,opt,name=build_id_kind,json=buildIdKind,proto3,enum=opentelemetry.proto.profiles.v1experimental.BuildIdKind" json:"build_id_kind,omitempty"` + FilenameStrindex int32 `protobuf:"varint,4,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"` // References to attributes in Profile.attribute_table. [optional] - Attributes []uint64 `protobuf:"varint,12,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` + AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` // The following fields indicate the resolution of symbolic info. - HasFunctions bool `protobuf:"varint,7,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"` - HasFilenames bool `protobuf:"varint,8,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"` - HasLineNumbers bool `protobuf:"varint,9,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"` - HasInlineFrames bool `protobuf:"varint,10,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"` + HasFunctions bool `protobuf:"varint,6,opt,name=has_functions,json=hasFunctions,proto3" json:"has_functions,omitempty"` + HasFilenames bool `protobuf:"varint,7,opt,name=has_filenames,json=hasFilenames,proto3" json:"has_filenames,omitempty"` + HasLineNumbers bool `protobuf:"varint,8,opt,name=has_line_numbers,json=hasLineNumbers,proto3" json:"has_line_numbers,omitempty"` + HasInlineFrames bool `protobuf:"varint,9,opt,name=has_inline_frames,json=hasInlineFrames,proto3" json:"has_inline_frames,omitempty"` } func (m *Mapping) Reset() { *m = Mapping{} } func (m *Mapping) String() string { return proto.CompactTextString(m) } func (*Mapping) ProtoMessage() {} func (*Mapping) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{6} + return fileDescriptor_ddd0cf081a2fe76f, []int{9} } func (m *Mapping) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -801,13 +998,6 @@ func (m *Mapping) XXX_DiscardUnknown() { var xxx_messageInfo_Mapping proto.InternalMessageInfo -func (m *Mapping) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - func (m *Mapping) GetMemoryStart() uint64 { if m != nil { return m.MemoryStart @@ -829,30 +1019,16 @@ func (m *Mapping) GetFileOffset() uint64 { return 0 } -func (m *Mapping) GetFilename() int64 { - if m != nil { - return m.Filename - } - return 0 -} - -func (m *Mapping) GetBuildId() int64 { +func (m *Mapping) GetFilenameStrindex() int32 { if m != nil { - return m.BuildId + return m.FilenameStrindex } return 0 } -func (m *Mapping) GetBuildIdKind() BuildIdKind { +func (m *Mapping) GetAttributeIndices() []int32 { if m != nil { - return m.BuildIdKind - } - return BuildIdKind_BUILD_ID_LINKER -} - -func (m *Mapping) GetAttributes() []uint64 { - if m != nil { - return m.Attributes + return m.AttributeIndices } return nil } @@ -887,19 +1063,19 @@ func (m *Mapping) GetHasInlineFrames() bool { // Describes function and line table debug information. type Location struct { - // Unique nonzero id for the location. A profile could use - // instruction addresses or any integer sequence as ids. [deprecated] - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // The index of the corresponding profile.Mapping for this location. + // Reference to mapping in Profile.mapping_table. // It can be unset if the mapping is unknown or not applicable for // this profile type. - MappingIndex uint64 `protobuf:"varint,2,opt,name=mapping_index,json=mappingIndex,proto3" json:"mapping_index,omitempty"` + // + // Types that are valid to be assigned to MappingIndex_: + // *Location_MappingIndex + MappingIndex_ isLocation_MappingIndex_ `protobuf_oneof:"mapping_index_"` // The instruction address for this location, if available. It // should be within [Mapping.memory_start...Mapping.memory_limit] // for the corresponding mapping. A non-leaf address may be in the // middle of a call instruction. It is up to display tools to find // the beginning of the instruction if necessary. - Address uint64 `protobuf:"varint,3,opt,name=address,proto3" json:"address,omitempty"` + Address uint64 `protobuf:"varint,2,opt,name=address,proto3" json:"address,omitempty"` // Multiple line indicates this location has inlined functions, // where the last entry represents the caller into which the // preceding entries were inlined. @@ -907,24 +1083,22 @@ type Location struct { // E.g., if memcpy() is inlined into printf: // line[0].function_name == "memcpy" // line[1].function_name == "printf" - Line []Line `protobuf:"bytes,4,rep,name=line,proto3" json:"line"` + Line []*Line `protobuf:"bytes,3,rep,name=line,proto3" json:"line,omitempty"` // Provides an indication that multiple symbols map to this location's // address, for example due to identical code folding by the linker. In that // case the line information above represents one of the multiple // symbols. This field must be recomputed when the symbolization state of the // profile changes. - IsFolded bool `protobuf:"varint,5,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"` - // Type of frame (e.g. kernel, native, python, hotspot, php). Index into string table. - TypeIndex uint32 `protobuf:"varint,6,opt,name=type_index,json=typeIndex,proto3" json:"type_index,omitempty"` + IsFolded bool `protobuf:"varint,4,opt,name=is_folded,json=isFolded,proto3" json:"is_folded,omitempty"` // References to attributes in Profile.attribute_table. [optional] - Attributes []uint64 `protobuf:"varint,7,rep,packed,name=attributes,proto3" json:"attributes,omitempty"` + AttributeIndices []int32 `protobuf:"varint,5,rep,packed,name=attribute_indices,json=attributeIndices,proto3" json:"attribute_indices,omitempty"` } func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{7} + return fileDescriptor_ddd0cf081a2fe76f, []int{10} } func (m *Location) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -953,16 +1127,28 @@ func (m *Location) XXX_DiscardUnknown() { var xxx_messageInfo_Location proto.InternalMessageInfo -func (m *Location) GetId() uint64 { +type isLocation_MappingIndex_ interface { + isLocation_MappingIndex_() + MarshalTo([]byte) (int, error) + Size() int +} + +type Location_MappingIndex struct { + MappingIndex int32 `protobuf:"varint,1,opt,name=mapping_index,json=mappingIndex,proto3,oneof" json:"mapping_index,omitempty"` +} + +func (*Location_MappingIndex) isLocation_MappingIndex_() {} + +func (m *Location) GetMappingIndex_() isLocation_MappingIndex_ { if m != nil { - return m.Id + return m.MappingIndex_ } - return 0 + return nil } -func (m *Location) GetMappingIndex() uint64 { - if m != nil { - return m.MappingIndex +func (m *Location) GetMappingIndex() int32 { + if x, ok := m.GetMappingIndex_().(*Location_MappingIndex); ok { + return x.MappingIndex } return 0 } @@ -974,7 +1160,7 @@ func (m *Location) GetAddress() uint64 { return 0 } -func (m *Location) GetLine() []Line { +func (m *Location) GetLine() []*Line { if m != nil { return m.Line } @@ -988,24 +1174,24 @@ func (m *Location) GetIsFolded() bool { return false } -func (m *Location) GetTypeIndex() uint32 { +func (m *Location) GetAttributeIndices() []int32 { if m != nil { - return m.TypeIndex + return m.AttributeIndices } - return 0 + return nil } -func (m *Location) GetAttributes() []uint64 { - if m != nil { - return m.Attributes +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Location) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Location_MappingIndex)(nil), } - return nil } // Details a specific line in a source code, linked to a function. type Line struct { - // The index of the corresponding profile.Function for this line. - FunctionIndex uint64 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"` + // Reference to function in Profile.function_table. + FunctionIndex int32 `protobuf:"varint,1,opt,name=function_index,json=functionIndex,proto3" json:"function_index,omitempty"` // Line number in source code. Line int64 `protobuf:"varint,2,opt,name=line,proto3" json:"line,omitempty"` // Column number in source code. @@ -1016,7 +1202,7 @@ func (m *Line) Reset() { *m = Line{} } func (m *Line) String() string { return proto.CompactTextString(m) } func (*Line) ProtoMessage() {} func (*Line) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{8} + return fileDescriptor_ddd0cf081a2fe76f, []int{11} } func (m *Line) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1045,7 +1231,7 @@ func (m *Line) XXX_DiscardUnknown() { var xxx_messageInfo_Line proto.InternalMessageInfo -func (m *Line) GetFunctionIndex() uint64 { +func (m *Line) GetFunctionIndex() int32 { if m != nil { return m.FunctionIndex } @@ -1069,24 +1255,22 @@ func (m *Line) GetColumn() int64 { // Describes a function, including its human-readable name, system name, // source file, and starting line number in the source. type Function struct { - // Unique nonzero id for the function. [deprecated] - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` // Name of the function, in human-readable form if available. - Name int64 `protobuf:"varint,2,opt,name=name,proto3" json:"name,omitempty"` + NameStrindex int32 `protobuf:"varint,1,opt,name=name_strindex,json=nameStrindex,proto3" json:"name_strindex,omitempty"` // Name of the function, as identified by the system. // For instance, it can be a C++ mangled name. - SystemName int64 `protobuf:"varint,3,opt,name=system_name,json=systemName,proto3" json:"system_name,omitempty"` + SystemNameStrindex int32 `protobuf:"varint,2,opt,name=system_name_strindex,json=systemNameStrindex,proto3" json:"system_name_strindex,omitempty"` // Source file containing the function. - Filename int64 `protobuf:"varint,4,opt,name=filename,proto3" json:"filename,omitempty"` + FilenameStrindex int32 `protobuf:"varint,3,opt,name=filename_strindex,json=filenameStrindex,proto3" json:"filename_strindex,omitempty"` // Line number in source file. - StartLine int64 `protobuf:"varint,5,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"` + StartLine int64 `protobuf:"varint,4,opt,name=start_line,json=startLine,proto3" json:"start_line,omitempty"` } func (m *Function) Reset() { *m = Function{} } func (m *Function) String() string { return proto.CompactTextString(m) } func (*Function) ProtoMessage() {} func (*Function) Descriptor() ([]byte, []int) { - return fileDescriptor_05f9ce3fdbeb046f, []int{9} + return fileDescriptor_ddd0cf081a2fe76f, []int{12} } func (m *Function) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1115,30 +1299,23 @@ func (m *Function) XXX_DiscardUnknown() { var xxx_messageInfo_Function proto.InternalMessageInfo -func (m *Function) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Function) GetName() int64 { +func (m *Function) GetNameStrindex() int32 { if m != nil { - return m.Name + return m.NameStrindex } return 0 } -func (m *Function) GetSystemName() int64 { +func (m *Function) GetSystemNameStrindex() int32 { if m != nil { - return m.SystemName + return m.SystemNameStrindex } return 0 } -func (m *Function) GetFilename() int64 { +func (m *Function) GetFilenameStrindex() int32 { if m != nil { - return m.Filename + return m.FilenameStrindex } return 0 } @@ -1151,122 +1328,134 @@ func (m *Function) GetStartLine() int64 { } func init() { - proto.RegisterEnum("opentelemetry.proto.profiles.v1experimental.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) - proto.RegisterEnum("opentelemetry.proto.profiles.v1experimental.BuildIdKind", BuildIdKind_name, BuildIdKind_value) - proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1experimental.Profile") - proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1experimental.AttributeUnit") - proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1experimental.Link") - proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1experimental.ValueType") - proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1experimental.Sample") - proto.RegisterType((*Label)(nil), "opentelemetry.proto.profiles.v1experimental.Label") - proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1experimental.Mapping") - proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1experimental.Location") - proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1experimental.Line") - proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1experimental.Function") + proto.RegisterEnum("opentelemetry.proto.profiles.v1development.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1development.ProfilesData") + proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1development.ResourceProfiles") + proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1development.ScopeProfiles") + proto.RegisterType((*Profile)(nil), "opentelemetry.proto.profiles.v1development.Profile") + proto.RegisterType((*AttributeUnit)(nil), "opentelemetry.proto.profiles.v1development.AttributeUnit") + proto.RegisterType((*Link)(nil), "opentelemetry.proto.profiles.v1development.Link") + proto.RegisterType((*ValueType)(nil), "opentelemetry.proto.profiles.v1development.ValueType") + proto.RegisterType((*Sample)(nil), "opentelemetry.proto.profiles.v1development.Sample") + proto.RegisterType((*Label)(nil), "opentelemetry.proto.profiles.v1development.Label") + proto.RegisterType((*Mapping)(nil), "opentelemetry.proto.profiles.v1development.Mapping") + proto.RegisterType((*Location)(nil), "opentelemetry.proto.profiles.v1development.Location") + proto.RegisterType((*Line)(nil), "opentelemetry.proto.profiles.v1development.Line") + proto.RegisterType((*Function)(nil), "opentelemetry.proto.profiles.v1development.Function") } func init() { - proto.RegisterFile("opentelemetry/proto/profiles/v1experimental/pprofextended.proto", fileDescriptor_05f9ce3fdbeb046f) -} - -var fileDescriptor_05f9ce3fdbeb046f = []byte{ - // 1483 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcd, 0x4f, 0x1b, 0x47, - 0x1b, 0xc7, 0x1f, 0xf8, 0xe3, 0x31, 0x06, 0x33, 0xe1, 0xe5, 0xdd, 0x37, 0xaf, 0x02, 0xc4, 0xa8, - 0x0d, 0x25, 0x92, 0x29, 0xa4, 0xad, 0xd2, 0xaa, 0x52, 0x6b, 0x82, 0x49, 0x56, 0x38, 0x86, 0x2e, - 0x86, 0x96, 0x2a, 0xd1, 0x6a, 0xf1, 0x0e, 0x66, 0xc4, 0xee, 0xec, 0x6a, 0x77, 0x8c, 0xb0, 0xd4, - 0x53, 0x8f, 0x51, 0x0f, 0x3d, 0xf7, 0x4f, 0xe8, 0xad, 0x7f, 0x41, 0xaf, 0x39, 0xe6, 0x52, 0xa9, - 0xea, 0x21, 0xaa, 0x92, 0xbf, 0xa1, 0xf7, 0x6a, 0x9e, 0x99, 0xb5, 0xcd, 0x47, 0x0e, 0x6e, 0x2f, - 0x68, 0x9e, 0xdf, 0xfc, 0xe6, 0x37, 0xcf, 0xec, 0xf3, 0x65, 0xe0, 0x8b, 0x20, 0xa4, 0x5c, 0x50, - 0x8f, 0xfa, 0x54, 0x44, 0xfd, 0xb5, 0x30, 0x0a, 0x44, 0x20, 0xff, 0x9e, 0x30, 0x8f, 0xc6, 0x6b, - 0xe7, 0xeb, 0xf4, 0x22, 0xa4, 0x11, 0xf3, 0x29, 0x17, 0x8e, 0xb7, 0x16, 0xca, 0x0d, 0x7a, 0x21, - 0x28, 0x77, 0xa9, 0x5b, 0x43, 0x2e, 0xb9, 0x7f, 0x49, 0x40, 0x81, 0xb5, 0x44, 0xa0, 0x76, 0x59, - 0xe0, 0xf6, 0x5c, 0x37, 0xe8, 0x06, 0xea, 0x0e, 0xb9, 0x52, 0xec, 0xdb, 0xab, 0x37, 0xf9, 0xd0, - 0x09, 0x7c, 0x3f, 0xe0, 0x6b, 0xe7, 0xeb, 0x7a, 0xa5, 0xb8, 0xd5, 0xbf, 0x0a, 0x90, 0xdf, 0x53, - 0xea, 0xe4, 0x39, 0x94, 0x62, 0xc7, 0x0f, 0x3d, 0x6a, 0x8b, 0x7e, 0x48, 0x8d, 0xd4, 0x52, 0x66, - 0xa5, 0xb4, 0xf1, 0x49, 0x6d, 0x0c, 0x87, 0x6a, 0x87, 0x8e, 0xd7, 0xa3, 0xed, 0x7e, 0x48, 0x37, - 0xb3, 0x2f, 0x5f, 0x2f, 0x4e, 0x58, 0xa0, 0x04, 0x25, 0x42, 0xbe, 0x82, 0x9c, 0xb2, 0x8c, 0x34, - 0x2a, 0x3f, 0x18, 0x4b, 0x79, 0x1f, 0x8f, 0x6a, 0x59, 0x2d, 0x44, 0xda, 0x90, 0xf7, 0x9d, 0x30, - 0x64, 0xbc, 0x6b, 0x64, 0x50, 0xf3, 0xa3, 0xb1, 0x34, 0x9f, 0xaa, 0xb3, 0x5a, 0x34, 0x91, 0x22, - 0x5f, 0x43, 0xc1, 0x0b, 0x3a, 0x8e, 0x60, 0x01, 0x37, 0xb2, 0x28, 0xfb, 0xf1, 0x58, 0xb2, 0x4d, - 0x7d, 0x58, 0xeb, 0x0e, 0xc4, 0xc8, 0x07, 0x50, 0x49, 0xd6, 0x36, 0xe3, 0x2e, 0xeb, 0xd0, 0xd8, - 0x98, 0x59, 0xca, 0xac, 0x64, 0xac, 0x99, 0x04, 0x37, 0x15, 0x2c, 0x7d, 0x38, 0xe9, 0xf1, 0x0e, - 0xfa, 0x30, 0xf9, 0x0f, 0x7c, 0xd8, 0xd6, 0x87, 0x13, 0x1f, 0x12, 0x31, 0x72, 0x08, 0x33, 0x8e, - 0x10, 0x11, 0x3b, 0xee, 0x09, 0x6a, 0x0b, 0xe7, 0xd8, 0xa3, 0x46, 0x05, 0xf5, 0xef, 0xdd, 0xa8, - 0xaf, 0x93, 0xe5, 0x7c, 0xbd, 0xb6, 0x43, 0xfb, 0x18, 0x5d, 0xad, 0x38, 0x3d, 0x50, 0x69, 0x4b, - 0x11, 0xc2, 0x46, 0x75, 0x7b, 0x9c, 0x89, 0xd8, 0x98, 0x45, 0xdd, 0xcf, 0xc6, 0xf2, 0xbb, 0x9e, - 0x68, 0x1c, 0x70, 0x26, 0xae, 0x5d, 0x25, 0xc1, 0x98, 0x1c, 0x02, 0x78, 0x8c, 0x9f, 0x69, 0xef, - 0x09, 0xde, 0xb2, 0x3e, 0x5e, 0x84, 0x18, 0x3f, 0xd3, 0xe2, 0x45, 0x29, 0xa5, 0x9e, 0x70, 0x17, - 0xa6, 0x62, 0x11, 0x31, 0xde, 0xd5, 0xca, 0xb9, 0xa5, 0xcc, 0x4a, 0xd1, 0x2a, 0x29, 0x4c, 0x51, - 0x16, 0xa1, 0xe4, 0x46, 0x41, 0x68, 0x9f, 0x44, 0x8e, 0x4f, 0x63, 0x23, 0xbf, 0x94, 0x5a, 0xc9, - 0x58, 0x20, 0xa1, 0x6d, 0x44, 0x24, 0xe1, 0x8c, 0xd2, 0x01, 0xa1, 0xa0, 0x08, 0x12, 0xd2, 0x84, - 0x3b, 0x00, 0x82, 0xf9, 0xd4, 0xe6, 0x0e, 0x0f, 0x62, 0xa3, 0x88, 0xfb, 0x45, 0x89, 0xb4, 0x24, - 0x40, 0xde, 0x83, 0x69, 0xb7, 0x17, 0xa9, 0x14, 0x51, 0x14, 0x40, 0x4a, 0x39, 0x41, 0x15, 0xed, - 0x39, 0x94, 0xe4, 0x73, 0x02, 0x57, 0x95, 0x6a, 0x69, 0x29, 0xf5, 0xef, 0x4b, 0x55, 0x09, 0x62, - 0xa9, 0xce, 0x43, 0x4e, 0x59, 0xc6, 0x14, 0xde, 0xae, 0x2d, 0x62, 0x40, 0x5e, 0x26, 0x04, 0xe5, - 0xc2, 0x28, 0x63, 0xde, 0x26, 0x26, 0xa9, 0xc1, 0x2d, 0x97, 0x9e, 0x38, 0x3d, 0x4f, 0xd8, 0xa3, - 0x3d, 0x64, 0x1a, 0x8f, 0xcf, 0xea, 0xad, 0xfd, 0x41, 0x33, 0xa8, 0x3e, 0x81, 0xf2, 0xa5, 0x50, - 0x93, 0x65, 0x28, 0x0f, 0xf3, 0xe7, 0x8c, 0xf6, 0x8d, 0x14, 0x1e, 0x9d, 0x1a, 0x80, 0x3b, 0xb4, - 0x4f, 0x08, 0x64, 0x65, 0x6a, 0x19, 0x69, 0xdc, 0xc3, 0x75, 0xf5, 0xd7, 0x14, 0x64, 0x65, 0x3c, - 0xc9, 0x33, 0x28, 0x88, 0xc8, 0xe9, 0x50, 0x9b, 0xb9, 0x78, 0x78, 0x6a, 0xb3, 0x2e, 0x1f, 0xf6, - 0xc7, 0xeb, 0xc5, 0x4f, 0xbb, 0xc1, 0x95, 0x4f, 0xc3, 0x64, 0x43, 0xf4, 0x3c, 0xda, 0x11, 0x41, - 0xb4, 0x16, 0xba, 0x8e, 0x70, 0xd6, 0x18, 0x17, 0x34, 0xe2, 0x8e, 0xb7, 0x26, 0xad, 0x5a, 0x5b, - 0x2a, 0x99, 0x5b, 0x56, 0x1e, 0x25, 0x4d, 0x97, 0x1c, 0x41, 0x3e, 0x0e, 0x1d, 0x2e, 0xc5, 0xd3, - 0x28, 0xfe, 0xa5, 0x16, 0x7f, 0x38, 0xbe, 0xf8, 0x7e, 0xe8, 0x70, 0x73, 0xcb, 0xca, 0x49, 0x41, - 0xd3, 0xad, 0xfe, 0x92, 0x82, 0xe2, 0x20, 0x1a, 0xf2, 0x8d, 0xba, 0xfd, 0xe2, 0x1b, 0x85, 0xc6, - 0xae, 0xbe, 0x9b, 0x7c, 0x07, 0xff, 0x75, 0xba, 0xdd, 0x88, 0x76, 0x55, 0xb2, 0x08, 0xea, 0x87, - 0x41, 0xe4, 0x78, 0x4c, 0xf4, 0x8d, 0xcc, 0x52, 0x6a, 0x65, 0x7a, 0xe3, 0xd1, 0x78, 0x85, 0x37, - 0xd4, 0x6a, 0x0f, 0xa5, 0xac, 0x79, 0xe7, 0x46, 0xbc, 0xfa, 0x22, 0x03, 0x39, 0x15, 0x4e, 0x99, - 0xb2, 0xa3, 0x5d, 0x8d, 0x5e, 0xe0, 0xe4, 0xc8, 0x5a, 0xe5, 0x91, 0x9e, 0x46, 0x2f, 0xc8, 0x06, - 0xfc, 0x27, 0x01, 0x62, 0x3b, 0x16, 0x4e, 0x24, 0x34, 0x5b, 0x16, 0x51, 0xd6, 0xba, 0x35, 0xd8, - 0xdc, 0x97, 0x7b, 0xea, 0xcc, 0x48, 0xc3, 0x8c, 0x6d, 0x8f, 0xf2, 0xae, 0x38, 0xc5, 0x92, 0xca, - 0x0e, 0x1b, 0x66, 0xdc, 0x44, 0x58, 0x26, 0x60, 0x2c, 0x9c, 0xce, 0x59, 0x92, 0x02, 0x5a, 0x5c, - 0x16, 0x58, 0xd9, 0x9a, 0x1d, 0x6e, 0x99, 0xae, 0x92, 0x9e, 0x83, 0xc9, 0x73, 0xf9, 0xcd, 0x71, - 0x18, 0x65, 0x2c, 0x65, 0x90, 0x16, 0x4c, 0x7a, 0xce, 0x31, 0xf5, 0xf4, 0x38, 0xd9, 0x18, 0xaf, - 0xab, 0xc8, 0x93, 0xba, 0x9a, 0x94, 0x0c, 0x59, 0x00, 0x18, 0x24, 0xb0, 0x2c, 0x65, 0xf9, 0x5d, - 0x46, 0x10, 0x19, 0x58, 0xd9, 0x7f, 0xb0, 0xcc, 0xb2, 0x16, 0xae, 0xc9, 0x87, 0x30, 0x27, 0xfb, - 0x41, 0x2c, 0x1c, 0x3f, 0x8c, 0x65, 0x2b, 0xbd, 0xc0, 0x4e, 0x80, 0x15, 0x97, 0xb5, 0xc8, 0x70, - 0xef, 0x80, 0xb3, 0x0b, 0xd9, 0x0e, 0xaa, 0xdf, 0xc0, 0x24, 0xde, 0x4d, 0x2a, 0x90, 0x19, 0x96, - 0x8e, 0x5c, 0x4a, 0x24, 0x16, 0x91, 0x4e, 0x1c, 0xb9, 0x94, 0x08, 0xef, 0xf9, 0x98, 0x23, 0x19, - 0x4b, 0x2e, 0xc9, 0xff, 0xa0, 0xc0, 0x7b, 0x3e, 0x36, 0x6d, 0x23, 0x8b, 0x70, 0x9e, 0xf7, 0x7c, - 0x59, 0x95, 0xd5, 0xdf, 0x32, 0x90, 0xd7, 0x53, 0x92, 0x4c, 0x43, 0x5a, 0x57, 0x56, 0xd6, 0x4a, - 0x33, 0x57, 0xb6, 0x4b, 0x9f, 0xfa, 0x41, 0xd4, 0x57, 0xd1, 0xc4, 0x3b, 0xb2, 0x56, 0x49, 0x61, - 0x18, 0xc4, 0x11, 0x8a, 0xc7, 0x7c, 0x26, 0xf0, 0xd2, 0x01, 0xa5, 0x29, 0x21, 0xd9, 0x30, 0xe5, - 0xc7, 0xb4, 0x83, 0x93, 0x93, 0x98, 0xaa, 0xfb, 0xb3, 0x16, 0x48, 0x68, 0x17, 0x11, 0x72, 0x1b, - 0x0a, 0xd2, 0xe2, 0x8e, 0x4f, 0x8d, 0x49, 0xf4, 0x6e, 0x60, 0x4b, 0xcf, 0x8f, 0x7b, 0xcc, 0x73, - 0x65, 0x55, 0xe6, 0x94, 0xe7, 0x68, 0x9b, 0x2e, 0x79, 0x06, 0xe5, 0x64, 0xcb, 0x3e, 0x63, 0xdc, - 0xc5, 0x1e, 0x39, 0xbd, 0xf1, 0x70, 0xac, 0x88, 0x6e, 0x2a, 0xb1, 0x1d, 0xc6, 0x5d, 0xab, 0x74, - 0x3c, 0x34, 0xae, 0xc4, 0x75, 0xea, 0x5a, 0x5c, 0x97, 0xa1, 0x7c, 0xea, 0xc4, 0x76, 0x32, 0x75, - 0xd5, 0xa4, 0x28, 0x58, 0x53, 0xa7, 0x4e, 0x9c, 0x4c, 0xe6, 0x21, 0x49, 0xbf, 0x46, 0x4d, 0x0b, - 0x4d, 0x4a, 0x30, 0xb2, 0x02, 0x15, 0x49, 0xf2, 0x18, 0xa7, 0x36, 0xef, 0xf9, 0xc7, 0x34, 0x52, - 0x53, 0xa3, 0x60, 0x4d, 0x9f, 0x3a, 0x71, 0x93, 0x71, 0xda, 0x52, 0x28, 0x59, 0x85, 0x59, 0xc9, - 0x64, 0x1c, 0xb9, 0x7a, 0x00, 0x01, 0x52, 0x67, 0x4e, 0x9d, 0xd8, 0x44, 0x5c, 0x4d, 0xa1, 0xea, - 0xf7, 0x69, 0x28, 0x24, 0x3f, 0x53, 0xae, 0x05, 0x76, 0x19, 0xca, 0xfa, 0xa7, 0x90, 0x2e, 0x22, - 0x15, 0xd9, 0x29, 0x0d, 0xaa, 0xfa, 0x31, 0x20, 0xef, 0xb8, 0x6e, 0x44, 0xe3, 0x58, 0x47, 0x35, - 0x31, 0xc9, 0x0e, 0xe6, 0x34, 0xd5, 0x3f, 0x9d, 0xc6, 0x1e, 0xcc, 0xc9, 0x3c, 0x42, 0x11, 0xf2, - 0x7f, 0x28, 0xb2, 0xd8, 0x3e, 0x09, 0x3c, 0x97, 0xba, 0x18, 0xfe, 0x82, 0x55, 0x60, 0xf1, 0x36, - 0xda, 0x38, 0x4b, 0xfb, 0x21, 0xd5, 0x5e, 0xe6, 0xb0, 0xd4, 0x8b, 0x12, 0x51, 0x2e, 0x5e, 0x0e, - 0x52, 0xfe, 0x6a, 0x90, 0xaa, 0x47, 0x38, 0x38, 0xb0, 0x81, 0x25, 0x81, 0x1a, 0x34, 0x30, 0xf9, - 0xa2, 0x72, 0x82, 0x2a, 0x39, 0xa2, 0xdf, 0xa5, 0x9b, 0x30, 0xba, 0x37, 0x0f, 0xb9, 0x4e, 0xe0, - 0xf5, 0x7c, 0xae, 0xeb, 0x49, 0x5b, 0xd5, 0x17, 0x29, 0x28, 0x24, 0x81, 0xbe, 0xf6, 0x7d, 0x09, - 0x64, 0x31, 0x9b, 0xb5, 0x10, 0x66, 0xf2, 0x22, 0x94, 0xe2, 0x7e, 0x2c, 0xa8, 0x6f, 0xe3, 0x96, - 0x52, 0x03, 0x05, 0xb5, 0x24, 0x61, 0xb4, 0x0c, 0xb2, 0x57, 0xca, 0xe0, 0x0e, 0x80, 0x6a, 0xa8, - 0xe8, 0x9f, 0x2a, 0x92, 0x22, 0x22, 0xf2, 0x7d, 0xab, 0x3f, 0xa4, 0x60, 0xfe, 0xe6, 0xf6, 0x4e, - 0xee, 0xc1, 0x72, 0xfd, 0xf1, 0x63, 0xab, 0xf1, 0xb8, 0xde, 0x36, 0x77, 0x5b, 0x76, 0xbb, 0xf1, - 0x74, 0x6f, 0xd7, 0xaa, 0x37, 0xcd, 0xf6, 0x91, 0x7d, 0xd0, 0xda, 0xdf, 0x6b, 0x3c, 0x32, 0xb7, - 0xcd, 0xc6, 0x56, 0x65, 0x82, 0xdc, 0x85, 0x3b, 0xef, 0x22, 0x6e, 0x35, 0x9a, 0xed, 0x7a, 0x25, - 0x45, 0xde, 0x87, 0xea, 0xbb, 0x28, 0x8f, 0x0e, 0x9e, 0x1e, 0x34, 0xeb, 0x6d, 0xf3, 0xb0, 0x51, - 0x49, 0xaf, 0x7e, 0x0e, 0xa5, 0x91, 0xba, 0x22, 0xb7, 0x60, 0x66, 0xf3, 0xc0, 0x6c, 0x6e, 0xd9, - 0xe6, 0x96, 0xdd, 0x34, 0x5b, 0x3b, 0x0d, 0xab, 0x32, 0x41, 0x0c, 0x98, 0x1b, 0x80, 0x9b, 0x66, - 0xab, 0x6e, 0x1d, 0xd9, 0x4f, 0xea, 0xfb, 0x4f, 0x2a, 0xa9, 0xcd, 0x9f, 0x52, 0x2f, 0xdf, 0x2c, - 0xa4, 0x5e, 0xbd, 0x59, 0x48, 0xfd, 0xf9, 0x66, 0x21, 0xf5, 0xe3, 0xdb, 0x85, 0x89, 0x57, 0x6f, - 0x17, 0x26, 0x7e, 0x7f, 0xbb, 0x30, 0xf1, 0xad, 0x35, 0xf6, 0x24, 0x56, 0xff, 0x1b, 0x75, 0x29, - 0x7f, 0xd7, 0xbf, 0x68, 0x3f, 0xa7, 0xef, 0xef, 0x86, 0x94, 0xb7, 0x07, 0x8a, 0x7b, 0x98, 0xbe, - 0x7b, 0x49, 0xfa, 0x1e, 0xae, 0x37, 0x46, 0xd8, 0xc7, 0x39, 0xd4, 0x7b, 0xf0, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xef, 0x03, 0x47, 0x6d, 0x06, 0x0e, 0x00, 0x00, -} - -func (m *Profile) Marshal() (dAtA []byte, err error) { + proto.RegisterFile("opentelemetry/proto/profiles/v1development/profiles.proto", fileDescriptor_ddd0cf081a2fe76f) +} + +var fileDescriptor_ddd0cf081a2fe76f = []byte{ + // 1647 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x6f, 0x1b, 0xc7, + 0x15, 0xd6, 0xf2, 0xce, 0xc3, 0x8b, 0xa8, 0x89, 0xac, 0x6c, 0xd3, 0x5a, 0x66, 0x68, 0xa4, 0x61, + 0x54, 0x44, 0xb2, 0xe4, 0xb4, 0x88, 0xd1, 0x02, 0xad, 0x64, 0x4a, 0x0e, 0x63, 0xea, 0xd2, 0x15, + 0x25, 0xd4, 0x6d, 0x80, 0xed, 0x88, 0x3b, 0xa4, 0xb6, 0xda, 0x9d, 0x5d, 0xec, 0x0c, 0x05, 0x13, + 0xfd, 0x09, 0xcd, 0x43, 0x1f, 0xfb, 0x1b, 0x0a, 0xf4, 0x37, 0xf4, 0x35, 0x8f, 0x46, 0x9f, 0x82, + 0x3e, 0x04, 0x85, 0xfd, 0x92, 0x16, 0xe8, 0x7f, 0x28, 0xe6, 0xb2, 0xcb, 0x4b, 0x29, 0x24, 0xeb, + 0x17, 0x61, 0xe7, 0x9c, 0x6f, 0xbe, 0x39, 0x67, 0xce, 0x65, 0x8e, 0x08, 0x4f, 0x82, 0x90, 0x50, + 0x4e, 0x3c, 0xe2, 0x13, 0x1e, 0x4d, 0x76, 0xc2, 0x28, 0xe0, 0x81, 0xf8, 0x3b, 0x74, 0x3d, 0xc2, + 0x76, 0x6e, 0x77, 0x1d, 0x72, 0x4b, 0xbc, 0x20, 0xf4, 0x09, 0xe5, 0x89, 0x78, 0x5b, 0xa2, 0xd0, + 0xd6, 0xdc, 0x56, 0x25, 0xdc, 0x4e, 0x30, 0x73, 0x5b, 0xdf, 0x5b, 0x1f, 0x05, 0xa3, 0x40, 0x91, + 0x8b, 0x2f, 0x05, 0x7e, 0x6f, 0x6b, 0xd9, 0xe1, 0x83, 0xc0, 0xf7, 0x03, 0xba, 0x73, 0xbb, 0xab, + 0xbf, 0x34, 0x76, 0x7b, 0x19, 0x36, 0x22, 0x2c, 0x18, 0x47, 0x03, 0x22, 0xd0, 0xf1, 0xb7, 0xc2, + 0xb7, 0x26, 0x50, 0x3d, 0xd3, 0xb6, 0x74, 0x30, 0xc7, 0xc8, 0x85, 0xb5, 0x18, 0x61, 0xc7, 0x46, + 0x9a, 0x46, 0x33, 0xdb, 0xae, 0xec, 0xfd, 0x62, 0xfb, 0xfb, 0x7b, 0xb2, 0x6d, 0x69, 0x92, 0x98, + 0xdc, 0x6a, 0x44, 0x0b, 0x92, 0xd6, 0xb7, 0x06, 0x34, 0x16, 0x61, 0xe8, 0x39, 0x94, 0x62, 0xa0, + 0x69, 0x34, 0x8d, 0x76, 0x65, 0xef, 0xa3, 0xa5, 0xc7, 0x26, 0x6e, 0xdc, 0xee, 0x26, 0x67, 0x1d, + 0xe4, 0xbe, 0xfa, 0xe6, 0xc1, 0x8a, 0x95, 0x10, 0xa0, 0xdf, 0x43, 0x9d, 0x0d, 0x82, 0x70, 0xc6, + 0x93, 0x8c, 0xf4, 0xe4, 0x49, 0x1a, 0x4f, 0xce, 0x05, 0x43, 0xe2, 0x46, 0x8d, 0xcd, 0x2e, 0xd1, + 0x7d, 0x00, 0x36, 0xb8, 0x26, 0x3e, 0xb6, 0xc7, 0x91, 0x67, 0x66, 0x9b, 0x46, 0xbb, 0x6c, 0x95, + 0x95, 0xe4, 0x22, 0xf2, 0x3e, 0x2f, 0x94, 0xbe, 0x2d, 0x36, 0xfe, 0x5d, 0x6c, 0xbd, 0x32, 0xa0, + 0x36, 0xc7, 0x83, 0x4e, 0x21, 0x2f, 0x99, 0xb4, 0x93, 0x8f, 0x97, 0x5a, 0xa4, 0x23, 0x7b, 0xbb, + 0xbb, 0xdd, 0xa5, 0x8c, 0x47, 0x63, 0x61, 0x0f, 0xe6, 0x6e, 0x40, 0x25, 0x97, 0x76, 0x57, 0xf1, + 0xa0, 0x53, 0x28, 0x2d, 0x78, 0xf9, 0x38, 0x8d, 0x97, 0xda, 0x30, 0x2b, 0x21, 0xf9, 0x0e, 0xd7, + 0x5a, 0x7f, 0xa9, 0x40, 0x51, 0x6f, 0x42, 0x97, 0x50, 0x61, 0xd8, 0x0f, 0x3d, 0x62, 0xf3, 0x89, + 0x74, 0x49, 0x1c, 0xff, 0xd3, 0x34, 0xc7, 0x5f, 0x62, 0x6f, 0x4c, 0xfa, 0x93, 0x90, 0x58, 0xa0, + 0x98, 0xc4, 0x37, 0xfa, 0x1c, 0x0a, 0x6a, 0xa5, 0x3d, 0xda, 0x4b, 0x15, 0x37, 0xb9, 0xd3, 0xd2, + 0x0c, 0xe8, 0x37, 0x50, 0xf3, 0x71, 0x18, 0xba, 0x74, 0x64, 0x73, 0x7c, 0xe5, 0x11, 0x33, 0x9b, + 0xfe, 0x92, 0x8e, 0x15, 0x81, 0x55, 0xd5, 0x4c, 0x7d, 0x41, 0x84, 0x7e, 0x07, 0x75, 0x2f, 0x18, + 0xc8, 0xb8, 0x68, 0xea, 0x9c, 0xa4, 0xfe, 0x24, 0x0d, 0x75, 0x4f, 0x33, 0x58, 0xb5, 0x98, 0x4b, + 0x91, 0x7f, 0x04, 0x8d, 0x84, 0xdc, 0xa5, 0x8e, 0x3b, 0x20, 0xcc, 0xcc, 0x37, 0xb3, 0xed, 0xbc, + 0xb5, 0x1a, 0xcb, 0xbb, 0x4a, 0x2c, 0xec, 0x18, 0x8e, 0xe9, 0x60, 0xc6, 0x8e, 0x42, 0x7a, 0x3b, + 0x8e, 0x34, 0x83, 0x55, 0x8b, 0xb9, 0x94, 0x1d, 0x97, 0xb0, 0x8a, 0x39, 0x8f, 0xdc, 0xab, 0x31, + 0x27, 0x9a, 0xbd, 0x28, 0xd9, 0x3f, 0xfc, 0x8e, 0xcc, 0x7d, 0x4e, 0x26, 0x32, 0xb8, 0x3a, 0x5b, + 0xeb, 0x09, 0x8b, 0xe2, 0xbd, 0x9a, 0xe5, 0x1d, 0x53, 0x97, 0x33, 0xb3, 0x94, 0xbe, 0x46, 0xf7, + 0x63, 0x8a, 0x0b, 0xea, 0xf2, 0x99, 0x33, 0xc4, 0x52, 0xd4, 0x1a, 0x78, 0x2e, 0xbd, 0xd1, 0x66, + 0x97, 0x25, 0xfd, 0xa3, 0x54, 0xc1, 0x71, 0xe9, 0x8d, 0x55, 0x16, 0x1c, 0xca, 0xe8, 0xf7, 0xa1, + 0xca, 0x78, 0x34, 0x4d, 0x25, 0x68, 0x66, 0xdb, 0x65, 0xab, 0xa2, 0x64, 0x0a, 0x72, 0x1f, 0x80, + 0xbb, 0x3e, 0xb1, 0x29, 0xa6, 0x01, 0x33, 0x2b, 0x4d, 0xa3, 0x9d, 0xb5, 0xca, 0x42, 0x72, 0x22, + 0x04, 0xe8, 0x03, 0xa8, 0x3b, 0xe3, 0x48, 0x85, 0x55, 0x41, 0xaa, 0x12, 0x52, 0x8b, 0xa5, 0x0a, + 0xf6, 0x05, 0x54, 0x42, 0x12, 0xb9, 0x81, 0xa3, 0x0a, 0xab, 0x26, 0x7b, 0xc5, 0xdb, 0x15, 0x96, + 0xbe, 0x7f, 0x50, 0x7c, 0xb2, 0xbc, 0x36, 0xa0, 0xa0, 0x56, 0x66, 0x5d, 0x1e, 0xae, 0x57, 0xe8, + 0x63, 0x40, 0x22, 0x7e, 0x84, 0x72, 0x5b, 0xba, 0xa4, 0xb2, 0x6e, 0x55, 0x66, 0xdd, 0x9a, 0xd6, + 0x9c, 0x27, 0x0a, 0xf4, 0x4b, 0xf8, 0x91, 0x43, 0x86, 0x78, 0xec, 0x71, 0x7b, 0xa6, 0x0b, 0xe8, + 0xad, 0xe4, 0xa5, 0xd9, 0x68, 0x1a, 0xed, 0xbc, 0xf5, 0x03, 0x8d, 0x39, 0x4f, 0xca, 0xfb, 0x5c, + 0x03, 0xd0, 0x15, 0x80, 0xb6, 0xde, 0x76, 0x1d, 0x73, 0xad, 0x69, 0xb4, 0xab, 0x07, 0x4f, 0x85, + 0xb5, 0xff, 0xfc, 0xe6, 0xc1, 0xcf, 0x47, 0xc1, 0x82, 0xbb, 0xae, 0x78, 0xfb, 0x3c, 0x8f, 0x0c, + 0x78, 0x10, 0xed, 0x84, 0x0e, 0xe6, 0x78, 0xc7, 0xa5, 0x9c, 0x44, 0x14, 0x7b, 0x3b, 0x62, 0x15, + 0xb7, 0xb2, 0x6e, 0xc7, 0x2a, 0x6b, 0xda, 0xae, 0x83, 0x8e, 0x01, 0x92, 0xac, 0x60, 0x26, 0x7a, + 0x9b, 0xd4, 0x9d, 0x21, 0x40, 0x9f, 0x82, 0xe9, 0x44, 0x41, 0x18, 0x12, 0xc7, 0x9e, 0x4a, 0xed, + 0x41, 0x30, 0xa6, 0xdc, 0x7c, 0xa7, 0x69, 0xb4, 0x6b, 0xd6, 0x86, 0xd6, 0x27, 0xa9, 0xc9, 0x9e, + 0x0a, 0x2d, 0xfa, 0x19, 0xbc, 0x1b, 0x44, 0xee, 0xc8, 0xa5, 0xd8, 0xb3, 0x43, 0x3c, 0xf1, 0x02, + 0xec, 0xd8, 0xc3, 0x20, 0xf2, 0x31, 0x37, 0xd7, 0x65, 0x8f, 0xbd, 0x17, 0xab, 0xcf, 0x94, 0xf6, + 0x48, 0x2a, 0x45, 0x23, 0x58, 0xdc, 0x67, 0xde, 0x13, 0x57, 0x65, 0xad, 0x2e, 0x6c, 0x68, 0xfd, + 0x01, 0x6a, 0x73, 0x05, 0x81, 0x3e, 0x81, 0x8d, 0x69, 0x91, 0xdd, 0x90, 0xc9, 0x34, 0x36, 0x86, + 0x8c, 0xcd, 0x7a, 0xa2, 0x7d, 0x4e, 0x26, 0x49, 0x58, 0x1e, 0x42, 0x4d, 0x14, 0xe4, 0x14, 0x9c, + 0x91, 0xe0, 0xaa, 0x10, 0xc6, 0xa0, 0xd6, 0xdf, 0x0d, 0xc8, 0x89, 0xf2, 0x40, 0x5f, 0x40, 0x89, + 0x47, 0x78, 0x20, 0x43, 0x68, 0xc8, 0x10, 0xee, 0xeb, 0x10, 0x3e, 0x49, 0x1f, 0xc2, 0xbe, 0x60, + 0xea, 0x76, 0xac, 0xa2, 0xa4, 0xec, 0x3a, 0xe8, 0x05, 0x14, 0x59, 0x88, 0xa9, 0x20, 0xcf, 0x48, + 0xf2, 0x5f, 0x69, 0xf2, 0x4f, 0xd3, 0x93, 0x9f, 0x87, 0x98, 0x76, 0x3b, 0x56, 0x41, 0x10, 0x76, + 0x9d, 0xd6, 0x3f, 0x0c, 0x28, 0x27, 0x55, 0x22, 0x9c, 0x9e, 0xcf, 0x5e, 0x75, 0x43, 0x55, 0x3e, + 0x9b, 0xb0, 0xdf, 0xe7, 0x66, 0xd0, 0x1f, 0xe1, 0x5d, 0x3c, 0x1a, 0x45, 0x64, 0xa4, 0x5f, 0x06, + 0xe2, 0x87, 0x41, 0x84, 0x3d, 0x97, 0x4f, 0xe4, 0x63, 0x5a, 0xdf, 0x3b, 0x48, 0xd5, 0xe1, 0xa6, + 0x54, 0xfd, 0x29, 0x93, 0xb5, 0x81, 0x97, 0xca, 0x5b, 0x5f, 0x66, 0xa0, 0xa0, 0x2a, 0x0d, 0xed, + 0xc1, 0xbd, 0xf8, 0xa5, 0x60, 0x36, 0xe3, 0x38, 0xe2, 0xf6, 0xac, 0x67, 0xef, 0x24, 0xca, 0x73, + 0xa1, 0xeb, 0x4a, 0xdb, 0x67, 0x5e, 0x1d, 0x66, 0x7b, 0x84, 0x8e, 0xf8, 0xb5, 0xf6, 0x31, 0x79, + 0x75, 0x58, 0x4f, 0x8a, 0xd1, 0x3a, 0xe4, 0x6f, 0xc5, 0xed, 0xc9, 0xf7, 0x34, 0x6b, 0xa9, 0x05, + 0xfa, 0x09, 0xac, 0x4d, 0x33, 0x2e, 0xee, 0x20, 0x39, 0xd9, 0x41, 0x1a, 0x89, 0x22, 0x7e, 0xb8, + 0x1e, 0xe8, 0xfe, 0xac, 0xcc, 0xca, 0x8b, 0x73, 0x3e, 0x5b, 0x51, 0xfd, 0x56, 0x99, 0xf3, 0x08, + 0xd6, 0x45, 0xeb, 0x64, 0x1c, 0xfb, 0x21, 0x13, 0xaf, 0xc4, 0x4b, 0xd9, 0x34, 0xe5, 0xfb, 0x96, + 0xb3, 0xd0, 0x54, 0x77, 0x41, 0xdd, 0x97, 0xa2, 0x73, 0x1e, 0xd4, 0xa0, 0x32, 0xa5, 0xb4, 0x5b, + 0x7f, 0x32, 0x20, 0xdf, 0xc3, 0x57, 0xc4, 0x13, 0xad, 0x7b, 0x49, 0x01, 0x54, 0x6e, 0x66, 0xf2, + 0x5e, 0x75, 0xf7, 0xc5, 0xe0, 0x8a, 0xee, 0x9e, 0x40, 0x1a, 0x90, 0xa5, 0x63, 0x5f, 0xc6, 0x31, + 0x6b, 0x89, 0x4f, 0xb4, 0x05, 0x6b, 0x74, 0xec, 0xdb, 0xf3, 0x69, 0x91, 0x53, 0x57, 0x46, 0xc7, + 0xfe, 0xc5, 0x6c, 0xcd, 0xfc, 0x27, 0x03, 0x45, 0x3d, 0x4a, 0x88, 0xc3, 0x7c, 0xe2, 0x07, 0xd1, + 0x44, 0x85, 0x46, 0xda, 0x93, 0xb3, 0x2a, 0x4a, 0x26, 0x23, 0x32, 0x03, 0xf1, 0x5c, 0xdf, 0xe5, + 0xd2, 0x9e, 0x04, 0xd2, 0x13, 0x22, 0xf4, 0x00, 0x2a, 0xb2, 0x7d, 0x06, 0xc3, 0x21, 0x23, 0x5c, + 0xda, 0x95, 0xb3, 0x40, 0x88, 0x4e, 0xa5, 0x44, 0xc4, 0x43, 0xac, 0x28, 0xf6, 0xc9, 0xa2, 0x79, + 0x8d, 0x58, 0x91, 0x78, 0xb7, 0x34, 0x78, 0xf9, 0x3b, 0x82, 0xf7, 0x10, 0x6a, 0xd7, 0x98, 0xd9, + 0xf1, 0xb4, 0xc0, 0xcc, 0x42, 0xd3, 0x68, 0x97, 0xac, 0xea, 0x35, 0x66, 0xf1, 0x2c, 0x31, 0x05, + 0xe9, 0x93, 0x98, 0x59, 0x9c, 0x82, 0x62, 0x19, 0x6a, 0x43, 0x43, 0x80, 0x3c, 0x97, 0x12, 0x9b, + 0x8e, 0xfd, 0x2b, 0x12, 0x89, 0x59, 0x40, 0xe0, 0xea, 0xd7, 0x98, 0xf5, 0x5c, 0x4a, 0x4e, 0x94, + 0x54, 0x5c, 0xb6, 0x40, 0xba, 0x54, 0x62, 0x87, 0x91, 0xa4, 0x2c, 0x4b, 0xe8, 0xea, 0x35, 0x66, + 0x5d, 0x29, 0x3f, 0x92, 0xe2, 0xd6, 0x7f, 0x0d, 0x28, 0xc5, 0xc3, 0x15, 0xfa, 0x60, 0x3a, 0x04, + 0xce, 0x84, 0xff, 0xb3, 0x95, 0x64, 0xa2, 0x53, 0xf9, 0x66, 0x42, 0x11, 0x3b, 0x4e, 0x44, 0x18, + 0xd3, 0x97, 0x1d, 0x2f, 0x51, 0x07, 0x72, 0x82, 0x5b, 0x0f, 0x8f, 0x69, 0x87, 0x08, 0x62, 0xc9, + 0xdd, 0xe8, 0x87, 0x50, 0x76, 0x99, 0x3d, 0x0c, 0x3c, 0x87, 0x38, 0x32, 0x0a, 0x25, 0xab, 0xe4, + 0xb2, 0x23, 0xb9, 0x4e, 0x75, 0xfb, 0x07, 0x0d, 0xa8, 0xcf, 0x39, 0x64, 0xb7, 0x5e, 0xc8, 0x7e, + 0x4c, 0xc4, 0x84, 0x91, 0x4c, 0x83, 0xb3, 0xa9, 0x9e, 0xcc, 0x75, 0xca, 0x55, 0xa4, 0x1d, 0xca, + 0xc8, 0x54, 0x56, 0xe6, 0x6d, 0x40, 0x61, 0x10, 0x78, 0x63, 0x9f, 0xea, 0x04, 0xd7, 0xab, 0xd6, + 0xdf, 0x0c, 0x28, 0xc5, 0x31, 0x15, 0x21, 0x9d, 0xcf, 0x26, 0xdd, 0x28, 0xe7, 0x32, 0xe9, 0x11, + 0xac, 0xb3, 0x09, 0xe3, 0xc4, 0xb7, 0xe7, 0xb1, 0xaa, 0xa4, 0x90, 0xd2, 0x9d, 0x2c, 0xe4, 0xde, + 0xff, 0x27, 0x6a, 0xf6, 0x8e, 0x44, 0x15, 0xff, 0xa2, 0xc8, 0x86, 0x26, 0x5d, 0xc8, 0xa9, 0x21, + 0x4b, 0x4a, 0xc4, 0x15, 0x6c, 0x7d, 0x69, 0xc0, 0xc6, 0xf2, 0xbe, 0x89, 0x3e, 0x84, 0x87, 0xfb, + 0xcf, 0x9e, 0x59, 0x87, 0xcf, 0xf6, 0xfb, 0xdd, 0xd3, 0x13, 0xbb, 0x7f, 0x78, 0x7c, 0x76, 0x6a, + 0xed, 0xf7, 0xba, 0xfd, 0x17, 0xf6, 0xc5, 0xc9, 0xf9, 0xd9, 0xe1, 0xd3, 0xee, 0x51, 0xf7, 0xb0, + 0xd3, 0x58, 0x41, 0xef, 0xc3, 0xfd, 0xbb, 0x80, 0x9d, 0xc3, 0x5e, 0x7f, 0xbf, 0x61, 0xa0, 0x1f, + 0x43, 0xeb, 0x2e, 0xc8, 0xd3, 0x8b, 0xe3, 0x8b, 0xde, 0x7e, 0xbf, 0x7b, 0x79, 0xd8, 0xc8, 0x1c, + 0x7c, 0x6d, 0x7c, 0xf5, 0x7a, 0xd3, 0x78, 0xf5, 0x7a, 0xd3, 0xf8, 0xd7, 0xeb, 0x4d, 0xe3, 0xcf, + 0x6f, 0x36, 0x57, 0x5e, 0xbd, 0xd9, 0x5c, 0xf9, 0xfa, 0xcd, 0xe6, 0x0a, 0x7c, 0xec, 0x06, 0x29, + 0x52, 0xe9, 0xa0, 0x16, 0xff, 0x1b, 0x79, 0x26, 0x50, 0x67, 0xc6, 0x6f, 0x7f, 0x9d, 0xfa, 0x15, + 0x54, 0xbf, 0x0c, 0x8c, 0x08, 0xbd, 0xe3, 0x57, 0x8c, 0xbf, 0x66, 0xb6, 0x4e, 0x43, 0x42, 0xfb, + 0x09, 0xa1, 0x3c, 0x2a, 0x9e, 0xad, 0xd8, 0xf6, 0xe5, 0x6e, 0x67, 0x0a, 0xbe, 0x2a, 0x48, 0xb6, + 0xc7, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x31, 0xf3, 0xe8, 0x27, 0x11, 0x00, 0x00, +} + +func (m *ProfilesData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1276,92 +1465,225 @@ func (m *Profile) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Profile) MarshalTo(dAtA []byte) (int, error) { +func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.LinkTable) > 0 { - for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ResourceProfiles) > 0 { + for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 + dAtA[i] = 0xa } } - if len(m.AttributeUnits) > 0 { - for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- { + return len(dAtA) - i, nil +} + +func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeProfiles) > 0 { + for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a + dAtA[i] = 0x12 } } - if len(m.AttributeTable) > 0 { - for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.Profiles) > 0 { + for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 + dAtA[i] = 0x12 } } - if len(m.LocationIndices) > 0 { - dAtA2 := make([]byte, len(m.LocationIndices)*10) - var j1 int - for _, num1 := range m.LocationIndices { - num := uint64(num1) - for num >= 1<<7 { - dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j1++ - } - dAtA2[j1] = uint8(num) - j1++ + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i -= j1 - copy(dAtA[i:], dAtA2[:j1]) - i = encodeVarintPprofextended(dAtA, i, uint64(j1)) - i-- - dAtA[i] = 0x7a + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) } - if m.DefaultSampleType != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.DefaultSampleType)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Profile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Profile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OriginalPayload) > 0 { + i -= len(m.OriginalPayload) + copy(dAtA[i:], m.OriginalPayload) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload))) i-- - dAtA[i] = 0x70 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if len(m.OriginalPayloadFormat) > 0 { + i -= len(m.OriginalPayloadFormat) + copy(dAtA[i:], m.OriginalPayloadFormat) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.DroppedAttributesCount != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + { + size := m.ProfileId.Size() + i -= size + if _, err := m.ProfileId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + if m.DefaultSampleTypeStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.DefaultSampleTypeStrindex)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - if len(m.Comment) > 0 { - dAtA4 := make([]byte, len(m.Comment)*10) + if len(m.CommentStrindices) > 0 { + dAtA4 := make([]byte, len(m.CommentStrindices)*10) var j3 int - for _, num1 := range m.Comment { + for _, num1 := range m.CommentStrindices { num := uint64(num1) for num >= 1<<7 { dAtA4[j3] = uint8(uint64(num)&0x7f | 0x80) @@ -1373,14 +1695,14 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i -= j3 copy(dAtA[i:], dAtA4[:j3]) - i = encodeVarintPprofextended(dAtA, i, uint64(j3)) + i = encodeVarintProfiles(dAtA, i, uint64(j3)) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x7a } if m.Period != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Period)) + i = encodeVarintProfiles(dAtA, i, uint64(m.Period)) i-- - dAtA[i] = 0x60 + dAtA[i] = 0x70 } { size, err := m.PeriodType.MarshalToSizedBuffer(dAtA[:i]) @@ -1388,76 +1710,127 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x5a + dAtA[i] = 0x6a if m.DurationNanos != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.DurationNanos)) + i = encodeVarintProfiles(dAtA, i, uint64(m.DurationNanos)) i-- - dAtA[i] = 0x50 + dAtA[i] = 0x60 } if m.TimeNanos != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.TimeNanos)) - i-- - dAtA[i] = 0x48 - } - if m.KeepFrames != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.KeepFrames)) - i-- - dAtA[i] = 0x40 - } - if m.DropFrames != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.DropFrames)) + i = encodeVarintProfiles(dAtA, i, uint64(m.TimeNanos)) i-- - dAtA[i] = 0x38 + dAtA[i] = 0x58 } if len(m.StringTable) > 0 { for iNdEx := len(m.StringTable) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.StringTable[iNdEx]) copy(dAtA[i:], m.StringTable[iNdEx]) - i = encodeVarintPprofextended(dAtA, i, uint64(len(m.StringTable[iNdEx]))) + i = encodeVarintProfiles(dAtA, i, uint64(len(m.StringTable[iNdEx]))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x52 + } + } + if len(m.LinkTable) > 0 { + for iNdEx := len(m.LinkTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LinkTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.AttributeUnits) > 0 { + for iNdEx := len(m.AttributeUnits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AttributeUnits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.AttributeTable) > 0 { + for iNdEx := len(m.AttributeTable) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AttributeTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProfiles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } } - if len(m.Function) > 0 { - for iNdEx := len(m.Function) - 1; iNdEx >= 0; iNdEx-- { + if len(m.FunctionTable) > 0 { + for iNdEx := len(m.FunctionTable) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Function[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.FunctionTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x32 + } + } + if len(m.LocationIndices) > 0 { + dAtA7 := make([]byte, len(m.LocationIndices)*10) + var j6 int + for _, num1 := range m.LocationIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j6++ + } + dAtA7[j6] = uint8(num) + j6++ } + i -= j6 + copy(dAtA[i:], dAtA7[:j6]) + i = encodeVarintProfiles(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x2a } - if len(m.Location) > 0 { - for iNdEx := len(m.Location) - 1; iNdEx >= 0; iNdEx-- { + if len(m.LocationTable) > 0 { + for iNdEx := len(m.LocationTable) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Location[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.LocationTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } - if len(m.Mapping) > 0 { - for iNdEx := len(m.Mapping) - 1; iNdEx >= 0; iNdEx-- { + if len(m.MappingTable) > 0 { + for iNdEx := len(m.MappingTable) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Mapping[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MappingTable[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a @@ -1471,7 +1844,7 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 @@ -1485,7 +1858,7 @@ func (m *Profile) MarshalToSizedBuffer(dAtA []byte) (int, error) { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa @@ -1514,13 +1887,13 @@ func (m *AttributeUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Unit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Unit)) + if m.UnitStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex)) i-- dAtA[i] = 0x10 } - if m.AttributeKey != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.AttributeKey)) + if m.AttributeKeyStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.AttributeKeyStrindex)) i-- dAtA[i] = 0x8 } @@ -1553,7 +1926,7 @@ func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { return 0, err } - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 @@ -1563,7 +1936,7 @@ func (m *Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { return 0, err } - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa @@ -1591,17 +1964,17 @@ func (m *ValueType) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if m.AggregationTemporality != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.AggregationTemporality)) + i = encodeVarintProfiles(dAtA, i, uint64(m.AggregationTemporality)) i-- dAtA[i] = 0x18 } - if m.Unit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Unit)) + if m.UnitStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.UnitStrindex)) i-- dAtA[i] = 0x10 } - if m.Type != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Type)) + if m.TypeStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.TypeStrindex)) i-- dAtA[i] = 0x8 } @@ -1629,32 +2002,9 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if len(m.TimestampsUnixNano) > 0 { - dAtA7 := make([]byte, len(m.TimestampsUnixNano)*10) - var j6 int - for _, num := range m.TimestampsUnixNano { - for num >= 1<<7 { - dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j6++ - } - dAtA7[j6] = uint8(num) - j6++ - } - i -= j6 - copy(dAtA[i:], dAtA7[:j6]) - i = encodeVarintPprofextended(dAtA, i, uint64(j6)) - i-- - dAtA[i] = 0x6a - } - if m.Link != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Link)) - i-- - dAtA[i] = 0x60 - } - if len(m.Attributes) > 0 { - dAtA9 := make([]byte, len(m.Attributes)*10) + dAtA9 := make([]byte, len(m.TimestampsUnixNano)*10) var j8 int - for _, num := range m.Attributes { + for _, num := range m.TimestampsUnixNano { for num >= 1<<7 { dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -1665,43 +2015,23 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i -= j8 copy(dAtA[i:], dAtA9[:j8]) - i = encodeVarintPprofextended(dAtA, i, uint64(j8)) - i-- - dAtA[i] = 0x52 - } - if m.StacktraceIdIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.StacktraceIdIndex)) - i-- - dAtA[i] = 0x48 - } - if m.LocationsLength != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.LocationsLength)) - i-- - dAtA[i] = 0x40 - } - if m.LocationsStartIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.LocationsStartIndex)) + i = encodeVarintProfiles(dAtA, i, uint64(j8)) i-- - dAtA[i] = 0x38 + dAtA[i] = 0x32 } - if len(m.Label) > 0 { - for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + if m.LinkIndex_ != nil { + { + size := m.LinkIndex_.Size() + i -= size + if _, err := m.LinkIndex_.MarshalTo(dAtA[i:]); err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a } } - if len(m.Value) > 0 { - dAtA11 := make([]byte, len(m.Value)*10) + if len(m.AttributeIndices) > 0 { + dAtA11 := make([]byte, len(m.AttributeIndices)*10) var j10 int - for _, num1 := range m.Value { + for _, num1 := range m.AttributeIndices { num := uint64(num1) for num >= 1<<7 { dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) @@ -1713,14 +2043,15 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i -= j10 copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintPprofextended(dAtA, i, uint64(j10)) + i = encodeVarintProfiles(dAtA, i, uint64(j10)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } - if len(m.LocationIndex) > 0 { - dAtA13 := make([]byte, len(m.LocationIndex)*10) + if len(m.Value) > 0 { + dAtA13 := make([]byte, len(m.Value)*10) var j12 int - for _, num := range m.LocationIndex { + for _, num1 := range m.Value { + num := uint64(num1) for num >= 1<<7 { dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -1731,13 +2062,35 @@ func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i -= j12 copy(dAtA[i:], dAtA13[:j12]) - i = encodeVarintPprofextended(dAtA, i, uint64(j12)) + i = encodeVarintProfiles(dAtA, i, uint64(j12)) + i-- + dAtA[i] = 0x1a + } + if m.LocationsLength != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsLength)) + i-- + dAtA[i] = 0x10 + } + if m.LocationsStartIndex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.LocationsStartIndex)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 } return len(dAtA) - i, nil } +func (m *Sample_LinkIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample_LinkIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintProfiles(dAtA, i, uint64(m.LinkIndex)) + i-- + dAtA[i] = 0x28 + return len(dAtA) - i, nil +} func (m *Label) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1758,23 +2111,23 @@ func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.NumUnit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.NumUnit)) + if m.NumUnitStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.NumUnitStrindex)) i-- dAtA[i] = 0x20 } if m.Num != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Num)) + i = encodeVarintProfiles(dAtA, i, uint64(m.Num)) i-- dAtA[i] = 0x18 } - if m.Str != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Str)) + if m.StrStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.StrStrindex)) i-- dAtA[i] = 0x10 } - if m.Key != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Key)) + if m.KeyStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.KeyStrindex)) i-- dAtA[i] = 0x8 } @@ -1801,29 +2154,6 @@ func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Attributes) > 0 { - dAtA15 := make([]byte, len(m.Attributes)*10) - var j14 int - for _, num := range m.Attributes { - for num >= 1<<7 { - dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j14++ - } - dAtA15[j14] = uint8(num) - j14++ - } - i -= j14 - copy(dAtA[i:], dAtA15[:j14]) - i = encodeVarintPprofextended(dAtA, i, uint64(j14)) - i-- - dAtA[i] = 0x62 - } - if m.BuildIdKind != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.BuildIdKind)) - i-- - dAtA[i] = 0x58 - } if m.HasInlineFrames { i-- if m.HasInlineFrames { @@ -1832,7 +2162,7 @@ func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x50 + dAtA[i] = 0x48 } if m.HasLineNumbers { i-- @@ -1842,7 +2172,7 @@ func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x48 + dAtA[i] = 0x40 } if m.HasFilenames { i-- @@ -1852,7 +2182,7 @@ func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x40 + dAtA[i] = 0x38 } if m.HasFunctions { i-- @@ -1862,35 +2192,44 @@ func (m *Mapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x38 - } - if m.BuildId != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.BuildId)) - i-- dAtA[i] = 0x30 } - if m.Filename != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Filename)) + if len(m.AttributeIndices) > 0 { + dAtA15 := make([]byte, len(m.AttributeIndices)*10) + var j14 int + for _, num1 := range m.AttributeIndices { + num := uint64(num1) + for num >= 1<<7 { + dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j14++ + } + dAtA15[j14] = uint8(num) + j14++ + } + i -= j14 + copy(dAtA[i:], dAtA15[:j14]) + i = encodeVarintProfiles(dAtA, i, uint64(j14)) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x2a } - if m.FileOffset != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.FileOffset)) + if m.FilenameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex)) i-- dAtA[i] = 0x20 } - if m.MemoryLimit != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.MemoryLimit)) + if m.FileOffset != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FileOffset)) i-- dAtA[i] = 0x18 } - if m.MemoryStart != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.MemoryStart)) + if m.MemoryLimit != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryLimit)) i-- dAtA[i] = 0x10 } - if m.Id != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) + if m.MemoryStart != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.MemoryStart)) i-- dAtA[i] = 0x8 } @@ -1917,10 +2256,11 @@ func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.Attributes) > 0 { - dAtA17 := make([]byte, len(m.Attributes)*10) + if len(m.AttributeIndices) > 0 { + dAtA17 := make([]byte, len(m.AttributeIndices)*10) var j16 int - for _, num := range m.Attributes { + for _, num1 := range m.AttributeIndices { + num := uint64(num1) for num >= 1<<7 { dAtA17[j16] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 @@ -1931,14 +2271,9 @@ func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i -= j16 copy(dAtA[i:], dAtA17[:j16]) - i = encodeVarintPprofextended(dAtA, i, uint64(j16)) - i-- - dAtA[i] = 0x3a - } - if m.TypeIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.TypeIndex)) + i = encodeVarintProfiles(dAtA, i, uint64(j16)) i-- - dAtA[i] = 0x30 + dAtA[i] = 0x2a } if m.IsFolded { i-- @@ -1948,7 +2283,7 @@ func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x28 + dAtA[i] = 0x20 } if len(m.Line) > 0 { for iNdEx := len(m.Line) - 1; iNdEx >= 0; iNdEx-- { @@ -1958,30 +2293,41 @@ func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { return 0, err } i -= size - i = encodeVarintPprofextended(dAtA, i, uint64(size)) + i = encodeVarintProfiles(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } } if m.Address != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Address)) - i-- - dAtA[i] = 0x18 - } - if m.MappingIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.MappingIndex)) + i = encodeVarintProfiles(dAtA, i, uint64(m.Address)) i-- dAtA[i] = 0x10 } - if m.Id != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 + if m.MappingIndex_ != nil { + { + size := m.MappingIndex_.Size() + i -= size + if _, err := m.MappingIndex_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } return len(dAtA) - i, nil } +func (m *Location_MappingIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Location_MappingIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintProfiles(dAtA, i, uint64(m.MappingIndex)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} func (m *Line) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2003,17 +2349,17 @@ func (m *Line) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if m.Column != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Column)) + i = encodeVarintProfiles(dAtA, i, uint64(m.Column)) i-- dAtA[i] = 0x18 } if m.Line != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Line)) + i = encodeVarintProfiles(dAtA, i, uint64(m.Line)) i-- dAtA[i] = 0x10 } if m.FunctionIndex != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.FunctionIndex)) + i = encodeVarintProfiles(dAtA, i, uint64(m.FunctionIndex)) i-- dAtA[i] = 0x8 } @@ -2041,35 +2387,30 @@ func (m *Function) MarshalToSizedBuffer(dAtA []byte) (int, error) { var l int _ = l if m.StartLine != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.StartLine)) - i-- - dAtA[i] = 0x28 - } - if m.Filename != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Filename)) + i = encodeVarintProfiles(dAtA, i, uint64(m.StartLine)) i-- dAtA[i] = 0x20 } - if m.SystemName != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.SystemName)) + if m.FilenameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.FilenameStrindex)) i-- dAtA[i] = 0x18 } - if m.Name != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Name)) + if m.SystemNameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.SystemNameStrindex)) i-- dAtA[i] = 0x10 } - if m.Id != 0 { - i = encodeVarintPprofextended(dAtA, i, uint64(m.Id)) + if m.NameStrindex != 0 { + i = encodeVarintProfiles(dAtA, i, uint64(m.NameStrindex)) i-- dAtA[i] = 0x8 } return len(dAtA) - i, nil } -func encodeVarintPprofextended(dAtA []byte, offset int, v uint64) int { - offset -= sovPprofextended(v) +func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int { + offset -= sovProfiles(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -2079,100 +2420,170 @@ func encodeVarintPprofextended(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *Profile) Size() (n int) { +func (m *ProfilesData) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.SampleType) > 0 { - for _, e := range m.SampleType { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if len(m.Sample) > 0 { - for _, e := range m.Sample { + if len(m.ResourceProfiles) > 0 { + for _, e := range m.ResourceProfiles { l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) } } - if len(m.Mapping) > 0 { - for _, e := range m.Mapping { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } + return n +} + +func (m *ResourceProfiles) Size() (n int) { + if m == nil { + return 0 } - if len(m.Location) > 0 { - for _, e := range m.Location { + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovProfiles(uint64(l)) + if len(m.ScopeProfiles) > 0 { + for _, e := range m.ScopeProfiles { l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) } } - if len(m.Function) > 0 { - for _, e := range m.Function { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) } - if len(m.StringTable) > 0 { - for _, s := range m.StringTable { - l = len(s) - n += 1 + l + sovPprofextended(uint64(l)) - } + return n +} + +func (m *ScopeProfiles) Size() (n int) { + if m == nil { + return 0 } - if m.DropFrames != 0 { - n += 1 + sovPprofextended(uint64(m.DropFrames)) + var l int + _ = l + l = m.Scope.Size() + n += 1 + l + sovProfiles(uint64(l)) + if len(m.Profiles) > 0 { + for _, e := range m.Profiles { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } } - if m.KeepFrames != 0 { - n += 1 + sovPprofextended(uint64(m.KeepFrames)) + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovProfiles(uint64(l)) } - if m.TimeNanos != 0 { - n += 1 + sovPprofextended(uint64(m.TimeNanos)) + return n +} + +func (m *Profile) Size() (n int) { + if m == nil { + return 0 } - if m.DurationNanos != 0 { - n += 1 + sovPprofextended(uint64(m.DurationNanos)) + var l int + _ = l + if len(m.SampleType) > 0 { + for _, e := range m.SampleType { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } } - l = m.PeriodType.Size() - n += 1 + l + sovPprofextended(uint64(l)) - if m.Period != 0 { - n += 1 + sovPprofextended(uint64(m.Period)) + if len(m.Sample) > 0 { + for _, e := range m.Sample { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } } - if len(m.Comment) > 0 { - l = 0 - for _, e := range m.Comment { - l += sovPprofextended(uint64(e)) + if len(m.MappingTable) > 0 { + for _, e := range m.MappingTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) } - n += 1 + sovPprofextended(uint64(l)) + l } - if m.DefaultSampleType != 0 { - n += 1 + sovPprofextended(uint64(m.DefaultSampleType)) + if len(m.LocationTable) > 0 { + for _, e := range m.LocationTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) + } } if len(m.LocationIndices) > 0 { l = 0 for _, e := range m.LocationIndices { - l += sovPprofextended(uint64(e)) + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if len(m.FunctionTable) > 0 { + for _, e := range m.FunctionTable { + l = e.Size() + n += 1 + l + sovProfiles(uint64(l)) } - n += 1 + sovPprofextended(uint64(l)) + l } if len(m.AttributeTable) > 0 { for _, e := range m.AttributeTable { l = e.Size() - n += 2 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) } } if len(m.AttributeUnits) > 0 { for _, e := range m.AttributeUnits { l = e.Size() - n += 2 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) } } if len(m.LinkTable) > 0 { for _, e := range m.LinkTable { l = e.Size() - n += 2 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) + } + } + if len(m.StringTable) > 0 { + for _, s := range m.StringTable { + l = len(s) + n += 1 + l + sovProfiles(uint64(l)) + } + } + if m.TimeNanos != 0 { + n += 1 + sovProfiles(uint64(m.TimeNanos)) + } + if m.DurationNanos != 0 { + n += 1 + sovProfiles(uint64(m.DurationNanos)) + } + l = m.PeriodType.Size() + n += 1 + l + sovProfiles(uint64(l)) + if m.Period != 0 { + n += 1 + sovProfiles(uint64(m.Period)) + } + if len(m.CommentStrindices) > 0 { + l = 0 + for _, e := range m.CommentStrindices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l + } + if m.DefaultSampleTypeStrindex != 0 { + n += 2 + sovProfiles(uint64(m.DefaultSampleTypeStrindex)) + } + l = m.ProfileId.Size() + n += 2 + l + sovProfiles(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 2 + l + sovProfiles(uint64(l)) } } + if m.DroppedAttributesCount != 0 { + n += 2 + sovProfiles(uint64(m.DroppedAttributesCount)) + } + l = len(m.OriginalPayloadFormat) + if l > 0 { + n += 2 + l + sovProfiles(uint64(l)) + } + l = len(m.OriginalPayload) + if l > 0 { + n += 2 + l + sovProfiles(uint64(l)) + } return n } @@ -2182,11 +2593,11 @@ func (m *AttributeUnit) Size() (n int) { } var l int _ = l - if m.AttributeKey != 0 { - n += 1 + sovPprofextended(uint64(m.AttributeKey)) + if m.AttributeKeyStrindex != 0 { + n += 1 + sovProfiles(uint64(m.AttributeKeyStrindex)) } - if m.Unit != 0 { - n += 1 + sovPprofextended(uint64(m.Unit)) + if m.UnitStrindex != 0 { + n += 1 + sovProfiles(uint64(m.UnitStrindex)) } return n } @@ -2198,9 +2609,9 @@ func (m *Link) Size() (n int) { var l int _ = l l = m.TraceId.Size() - n += 1 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) l = m.SpanId.Size() - n += 1 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) return n } @@ -2210,14 +2621,14 @@ func (m *ValueType) Size() (n int) { } var l int _ = l - if m.Type != 0 { - n += 1 + sovPprofextended(uint64(m.Type)) + if m.TypeStrindex != 0 { + n += 1 + sovProfiles(uint64(m.TypeStrindex)) } - if m.Unit != 0 { - n += 1 + sovPprofextended(uint64(m.Unit)) + if m.UnitStrindex != 0 { + n += 1 + sovProfiles(uint64(m.UnitStrindex)) } if m.AggregationTemporality != 0 { - n += 1 + sovPprofextended(uint64(m.AggregationTemporality)) + n += 1 + sovProfiles(uint64(m.AggregationTemporality)) } return n } @@ -2228,72 +2639,65 @@ func (m *Sample) Size() (n int) { } var l int _ = l - if len(m.LocationIndex) > 0 { - l = 0 - for _, e := range m.LocationIndex { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l + if m.LocationsStartIndex != 0 { + n += 1 + sovProfiles(uint64(m.LocationsStartIndex)) + } + if m.LocationsLength != 0 { + n += 1 + sovProfiles(uint64(m.LocationsLength)) } if len(m.Value) > 0 { l = 0 for _, e := range m.Value { - l += sovPprofextended(uint64(e)) + l += sovProfiles(uint64(e)) } - n += 1 + sovPprofextended(uint64(l)) + l + n += 1 + sovProfiles(uint64(l)) + l } - if len(m.Label) > 0 { - for _, e := range m.Label { - l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) - } - } - if m.LocationsStartIndex != 0 { - n += 1 + sovPprofextended(uint64(m.LocationsStartIndex)) - } - if m.LocationsLength != 0 { - n += 1 + sovPprofextended(uint64(m.LocationsLength)) - } - if m.StacktraceIdIndex != 0 { - n += 1 + sovPprofextended(uint64(m.StacktraceIdIndex)) - } - if len(m.Attributes) > 0 { + if len(m.AttributeIndices) > 0 { l = 0 - for _, e := range m.Attributes { - l += sovPprofextended(uint64(e)) + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) } - n += 1 + sovPprofextended(uint64(l)) + l + n += 1 + sovProfiles(uint64(l)) + l } - if m.Link != 0 { - n += 1 + sovPprofextended(uint64(m.Link)) + if m.LinkIndex_ != nil { + n += m.LinkIndex_.Size() } if len(m.TimestampsUnixNano) > 0 { l = 0 for _, e := range m.TimestampsUnixNano { - l += sovPprofextended(uint64(e)) + l += sovProfiles(uint64(e)) } - n += 1 + sovPprofextended(uint64(l)) + l + n += 1 + sovProfiles(uint64(l)) + l } return n } +func (m *Sample_LinkIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovProfiles(uint64(m.LinkIndex)) + return n +} func (m *Label) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Key != 0 { - n += 1 + sovPprofextended(uint64(m.Key)) + if m.KeyStrindex != 0 { + n += 1 + sovProfiles(uint64(m.KeyStrindex)) } - if m.Str != 0 { - n += 1 + sovPprofextended(uint64(m.Str)) + if m.StrStrindex != 0 { + n += 1 + sovProfiles(uint64(m.StrStrindex)) } if m.Num != 0 { - n += 1 + sovPprofextended(uint64(m.Num)) + n += 1 + sovProfiles(uint64(m.Num)) } - if m.NumUnit != 0 { - n += 1 + sovPprofextended(uint64(m.NumUnit)) + if m.NumUnitStrindex != 0 { + n += 1 + sovProfiles(uint64(m.NumUnitStrindex)) } return n } @@ -2304,23 +2708,24 @@ func (m *Mapping) Size() (n int) { } var l int _ = l - if m.Id != 0 { - n += 1 + sovPprofextended(uint64(m.Id)) - } if m.MemoryStart != 0 { - n += 1 + sovPprofextended(uint64(m.MemoryStart)) + n += 1 + sovProfiles(uint64(m.MemoryStart)) } if m.MemoryLimit != 0 { - n += 1 + sovPprofextended(uint64(m.MemoryLimit)) + n += 1 + sovProfiles(uint64(m.MemoryLimit)) } if m.FileOffset != 0 { - n += 1 + sovPprofextended(uint64(m.FileOffset)) + n += 1 + sovProfiles(uint64(m.FileOffset)) } - if m.Filename != 0 { - n += 1 + sovPprofextended(uint64(m.Filename)) + if m.FilenameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.FilenameStrindex)) } - if m.BuildId != 0 { - n += 1 + sovPprofextended(uint64(m.BuildId)) + if len(m.AttributeIndices) > 0 { + l = 0 + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) + } + n += 1 + sovProfiles(uint64(l)) + l } if m.HasFunctions { n += 2 @@ -2334,16 +2739,6 @@ func (m *Mapping) Size() (n int) { if m.HasInlineFrames { n += 2 } - if m.BuildIdKind != 0 { - n += 1 + sovPprofextended(uint64(m.BuildIdKind)) - } - if len(m.Attributes) > 0 { - l = 0 - for _, e := range m.Attributes { - l += sovPprofextended(uint64(e)) - } - n += 1 + sovPprofextended(uint64(l)) + l - } return n } @@ -2353,37 +2748,40 @@ func (m *Location) Size() (n int) { } var l int _ = l - if m.Id != 0 { - n += 1 + sovPprofextended(uint64(m.Id)) - } - if m.MappingIndex != 0 { - n += 1 + sovPprofextended(uint64(m.MappingIndex)) + if m.MappingIndex_ != nil { + n += m.MappingIndex_.Size() } if m.Address != 0 { - n += 1 + sovPprofextended(uint64(m.Address)) + n += 1 + sovProfiles(uint64(m.Address)) } if len(m.Line) > 0 { for _, e := range m.Line { l = e.Size() - n += 1 + l + sovPprofextended(uint64(l)) + n += 1 + l + sovProfiles(uint64(l)) } } if m.IsFolded { n += 2 } - if m.TypeIndex != 0 { - n += 1 + sovPprofextended(uint64(m.TypeIndex)) - } - if len(m.Attributes) > 0 { + if len(m.AttributeIndices) > 0 { l = 0 - for _, e := range m.Attributes { - l += sovPprofextended(uint64(e)) + for _, e := range m.AttributeIndices { + l += sovProfiles(uint64(e)) } - n += 1 + sovPprofextended(uint64(l)) + l + n += 1 + sovProfiles(uint64(l)) + l } return n } +func (m *Location_MappingIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovProfiles(uint64(m.MappingIndex)) + return n +} func (m *Line) Size() (n int) { if m == nil { return 0 @@ -2391,13 +2789,13 @@ func (m *Line) Size() (n int) { var l int _ = l if m.FunctionIndex != 0 { - n += 1 + sovPprofextended(uint64(m.FunctionIndex)) + n += 1 + sovProfiles(uint64(m.FunctionIndex)) } if m.Line != 0 { - n += 1 + sovPprofextended(uint64(m.Line)) + n += 1 + sovProfiles(uint64(m.Line)) } if m.Column != 0 { - n += 1 + sovPprofextended(uint64(m.Column)) + n += 1 + sovProfiles(uint64(m.Column)) } return n } @@ -2408,31 +2806,28 @@ func (m *Function) Size() (n int) { } var l int _ = l - if m.Id != 0 { - n += 1 + sovPprofextended(uint64(m.Id)) - } - if m.Name != 0 { - n += 1 + sovPprofextended(uint64(m.Name)) + if m.NameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.NameStrindex)) } - if m.SystemName != 0 { - n += 1 + sovPprofextended(uint64(m.SystemName)) + if m.SystemNameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.SystemNameStrindex)) } - if m.Filename != 0 { - n += 1 + sovPprofextended(uint64(m.Filename)) + if m.FilenameStrindex != 0 { + n += 1 + sovProfiles(uint64(m.FilenameStrindex)) } if m.StartLine != 0 { - n += 1 + sovPprofextended(uint64(m.StartLine)) + n += 1 + sovProfiles(uint64(m.StartLine)) } return n } -func sovPprofextended(x uint64) (n int) { +func sovProfiles(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } -func sozPprofextended(x uint64) (n int) { - return sovPprofextended(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func sozProfiles(x uint64) (n int) { + return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *Profile) Unmarshal(dAtA []byte) error { +func (m *ProfilesData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2440,7 +2835,7 @@ func (m *Profile) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2455,20 +2850,20 @@ func (m *Profile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Profile: wiretype end group for non-group") + return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2481,28 +2876,78 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.SampleType = append(m.SampleType, ValueType{}) - if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{}) + if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceProfiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2515,28 +2960,27 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.Sample = append(m.Sample, Sample{}) - if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mapping", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2549,62 +2993,110 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.Mapping = append(m.Mapping, Mapping{}) - if err := m.Mapping[len(m.Mapping)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{}) + if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthPprofextended + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.Location = append(m.Location, Location{}) - if err := m.Location[len(m.Location)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 5: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeProfiles) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Function", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2617,136 +3109,211 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.Function = append(m.Function, Function{}) - if err := m.Function[len(m.Function)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPprofextended + if msglen < 0 { + return ErrInvalidLengthProfiles } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex])) + m.Profiles = append(m.Profiles, &Profile{}) + if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DropFrames", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) } - m.DropFrames = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.DropFrames |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepFrames", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles } - m.KeepFrames = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepFrames |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - m.TimeNanos = 0 + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProfiles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProfiles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Profile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Profile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Profile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleType", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.TimeNanos |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType) + if msglen < 0 { + return ErrInvalidLengthProfiles } - m.DurationNanos = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SampleType = append(m.SampleType, &ValueType{}) + if err := m.SampleType[len(m.SampleType)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sample", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.DurationNanos |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 11: + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sample = append(m.Sample, &Sample{}) + if err := m.Sample[len(m.Sample)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MappingTable", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2759,61 +3326,77 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.MappingTable = append(m.MappingTable, &Mapping{}) + if err := m.MappingTable[len(m.MappingTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationTable", wireType) } - m.Period = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Period |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 13: + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocationTable = append(m.LocationTable, &Location{}) + if err := m.LocationTable[len(m.LocationTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType == 0 { - var v int64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Comment = append(m.Comment, v) + m.LocationIndices = append(m.LocationIndices, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2826,11 +3409,11 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if packedLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + packedLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -2843,72 +3426,311 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } elementCount = count - if elementCount != 0 && len(m.Comment) == 0 { - m.Comment = make([]int64, 0, elementCount) + if elementCount != 0 && len(m.LocationIndices) == 0 { + m.LocationIndices = make([]int32, 0, elementCount) } for iNdEx < postIndex { - var v int64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Comment = append(m.Comment, v) + m.LocationIndices = append(m.LocationIndices, v) } } else { - return fmt.Errorf("proto: wrong wireType = %d for field Comment", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FunctionTable", wireType) } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FunctionTable = append(m.FunctionTable, &Function{}) + if err := m.FunctionTable[len(m.FunctionTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttributeTable = append(m.AttributeTable, v11.KeyValue{}) + if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttributeUnits = append(m.AttributeUnits, &AttributeUnit{}) + if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LinkTable = append(m.LinkTable, &Link{}) + if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringTable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StringTable = append(m.StringTable, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNanos", wireType) + } + m.TimeNanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNanos |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationNanos", wireType) + } + m.DurationNanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationNanos |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodType", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PeriodType.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 14: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) } - m.DefaultSampleType = 0 + m.Period = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.DefaultSampleType |= int64(b&0x7F) << shift + m.Period |= int64(b&0x7F) << shift if b < 0x80 { break } } case 15: if wireType == 0 { - var v int64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.LocationIndices = append(m.LocationIndices, v) + m.CommentStrindices = append(m.CommentStrindices, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2921,11 +3743,11 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if packedLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + packedLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -2938,72 +3760,90 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } elementCount = count - if elementCount != 0 && len(m.LocationIndices) == 0 { - m.LocationIndices = make([]int64, 0, elementCount) + if elementCount != 0 && len(m.CommentStrindices) == 0 { + m.CommentStrindices = make([]int32, 0, elementCount) } for iNdEx < postIndex { - var v int64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.LocationIndices = append(m.LocationIndices, v) + m.CommentStrindices = append(m.CommentStrindices, v) } } else { - return fmt.Errorf("proto: wrong wireType = %d for field LocationIndices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CommentStrindices", wireType) } case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultSampleTypeStrindex", wireType) + } + m.DefaultSampleTypeStrindex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DefaultSampleTypeStrindex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeTable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthPprofextended + if byteLen < 0 { + return ErrInvalidLengthProfiles } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.AttributeTable = append(m.AttributeTable, v1.KeyValue{}) - if err := m.AttributeTable[len(m.AttributeTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProfileId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 17: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeUnits", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3016,62 +3856,113 @@ func (m *Profile) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.AttributeUnits = append(m.AttributeUnits, AttributeUnit{}) - if err := m.AttributeUnits[len(m.AttributeUnits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 18: + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LinkTable", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthPprofextended + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProfiles } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.LinkTable = append(m.LinkTable, Link{}) - if err := m.LinkTable[len(m.LinkTable)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...) + if m.OriginalPayload == nil { + m.OriginalPayload = []byte{} } iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -3093,7 +3984,7 @@ func (m *AttributeUnit) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3116,50 +4007,50 @@ func (m *AttributeUnit) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttributeKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttributeKeyStrindex", wireType) } - m.AttributeKey = 0 + m.AttributeKeyStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.AttributeKey |= int64(b&0x7F) << shift + m.AttributeKeyStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) } - m.Unit = 0 + m.UnitStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Unit |= int64(b&0x7F) << shift + m.UnitStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -3181,7 +4072,7 @@ func (m *Link) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3209,7 +4100,7 @@ func (m *Link) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3222,11 +4113,11 @@ func (m *Link) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -3242,7 +4133,7 @@ func (m *Link) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3255,11 +4146,11 @@ func (m *Link) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + byteLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -3270,12 +4161,12 @@ func (m *Link) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -3297,7 +4188,7 @@ func (m *ValueType) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3320,38 +4211,38 @@ func (m *ValueType) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TypeStrindex", wireType) } - m.Type = 0 + m.TypeStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Type |= int64(b&0x7F) << shift + m.TypeStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnitStrindex", wireType) } - m.Unit = 0 + m.UnitStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Unit |= int64(b&0x7F) << shift + m.UnitStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3363,7 +4254,7 @@ func (m *ValueType) Unmarshal(dAtA []byte) error { m.AggregationTemporality = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3377,12 +4268,12 @@ func (m *ValueType) Unmarshal(dAtA []byte) error { } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -3404,7 +4295,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3426,87 +4317,49 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType) + } + m.LocationsStartIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles } - m.LocationIndex = append(m.LocationIndex, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - if packedLen < 0 { - return ErrInvalidLengthPprofextended + b := dAtA[iNdEx] + iNdEx++ + m.LocationsStartIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType) + } + m.LocationsLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.LocationIndex) == 0 { - m.LocationIndex = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LocationIndex = append(m.LocationIndex, v) + b := dAtA[iNdEx] + iNdEx++ + m.LocationsLength |= int32(b&0x7F) << shift + if b < 0x80 { + break } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field LocationIndex", wireType) } - case 2: + case 3: if wireType == 0 { var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3523,7 +4376,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3536,11 +4389,11 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } if packedLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + packedLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -3560,137 +4413,46 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = append(m.Value, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Label = append(m.Label, Label{}) - if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocationsStartIndex", wireType) - } - m.LocationsStartIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocationsStartIndex |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LocationsLength", wireType) - } - m.LocationsLength = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LocationsLength |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StacktraceIdIndex", wireType) - } - m.StacktraceIdIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StacktraceIdIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = append(m.Value, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - case 10: + case 4: if wireType == 0 { - var v uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Attributes = append(m.Attributes, v) + m.AttributeIndices = append(m.AttributeIndices, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3703,11 +4465,11 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } if packedLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + packedLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -3720,55 +4482,56 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } elementCount = count - if elementCount != 0 && len(m.Attributes) == 0 { - m.Attributes = make([]uint64, 0, elementCount) + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) } for iNdEx < postIndex { - var v uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Attributes = append(m.Attributes, v) + m.AttributeIndices = append(m.AttributeIndices, v) } } else { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) } - case 12: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Link", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LinkIndex", wireType) } - m.Link = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Link |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 13: + m.LinkIndex_ = &Sample_LinkIndex{v} + case 6: if wireType == 0 { var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3785,7 +4548,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3798,11 +4561,11 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } } if packedLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + packedLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -3822,7 +4585,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3841,12 +4604,12 @@ func (m *Sample) Unmarshal(dAtA []byte) error { } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -3868,7 +4631,7 @@ func (m *Label) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3891,38 +4654,38 @@ func (m *Label) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyStrindex", wireType) } - m.Key = 0 + m.KeyStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Key |= int64(b&0x7F) << shift + m.KeyStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Str", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StrStrindex", wireType) } - m.Str = 0 + m.StrStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Str |= int64(b&0x7F) << shift + m.StrStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3934,7 +4697,7 @@ func (m *Label) Unmarshal(dAtA []byte) error { m.Num = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -3948,31 +4711,31 @@ func (m *Label) Unmarshal(dAtA []byte) error { } case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumUnit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NumUnitStrindex", wireType) } - m.NumUnit = 0 + m.NumUnitStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.NumUnit |= int64(b&0x7F) << shift + m.NumUnitStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -3994,7 +4757,7 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4016,32 +4779,13 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MemoryStart", wireType) } m.MemoryStart = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4053,14 +4797,14 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { break } } - case 3: + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimit", wireType) } m.MemoryLimit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4072,14 +4816,14 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { break } } - case 4: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FileOffset", wireType) } m.FileOffset = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4091,52 +4835,109 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { break } } - case 5: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType) } - m.Filename = 0 + m.FilenameStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Filename |= int64(b&0x7F) << shift + m.FilenameStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BuildId", wireType) - } - m.BuildId = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended + case 5: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.AttributeIndices = append(m.AttributeIndices, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthProfiles + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthProfiles + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.BuildId |= int64(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProfiles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttributeIndices = append(m.AttributeIndices, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) } - case 7: + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HasFunctions", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4149,14 +4950,14 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { } } m.HasFunctions = bool(v != 0) - case 8: + case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HasFilenames", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4169,14 +4970,14 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { } } m.HasFilenames = bool(v != 0) - case 9: + case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HasLineNumbers", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4189,14 +4990,14 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { } } m.HasLineNumbers = bool(v != 0) - case 10: + case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field HasInlineFrames", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4209,109 +5010,14 @@ func (m *Mapping) Unmarshal(dAtA []byte) error { } } m.HasInlineFrames = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BuildIdKind", wireType) - } - m.BuildIdKind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.BuildIdKind |= BuildIdKind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthPprofextended - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthPprofextended - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Attributes) == 0 { - m.Attributes = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Attributes = append(m.Attributes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -4333,7 +5039,7 @@ func (m *Location) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4355,51 +5061,33 @@ func (m *Location) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MappingIndex", wireType) } - m.MappingIndex = 0 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.MappingIndex |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - case 3: + m.MappingIndex_ = &Location_MappingIndex{v} + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } m.Address = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4411,14 +5099,14 @@ func (m *Location) Unmarshal(dAtA []byte) error { break } } - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4431,28 +5119,28 @@ func (m *Location) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + msglen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF } - m.Line = append(m.Line, Line{}) + m.Line = append(m.Line, &Line{}) if err := m.Line[len(m.Line)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field IsFolded", wireType) } var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4465,48 +5153,29 @@ func (m *Location) Unmarshal(dAtA []byte) error { } } m.IsFolded = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeIndex", wireType) - } - m.TypeIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TypeIndex |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: + case 5: if wireType == 0 { - var v uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Attributes = append(m.Attributes, v) + m.AttributeIndices = append(m.AttributeIndices, v) } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4519,11 +5188,11 @@ func (m *Location) Unmarshal(dAtA []byte) error { } } if packedLen < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } postIndex := iNdEx + packedLen if postIndex < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if postIndex > l { return io.ErrUnexpectedEOF @@ -4536,38 +5205,38 @@ func (m *Location) Unmarshal(dAtA []byte) error { } } elementCount = count - if elementCount != 0 && len(m.Attributes) == 0 { - m.Attributes = make([]uint64, 0, elementCount) + if elementCount != 0 && len(m.AttributeIndices) == 0 { + m.AttributeIndices = make([]int32, 0, elementCount) } for iNdEx < postIndex { - var v uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - v |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - m.Attributes = append(m.Attributes, v) + m.AttributeIndices = append(m.AttributeIndices, v) } } else { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttributeIndices", wireType) } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -4589,7 +5258,7 @@ func (m *Line) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4617,14 +5286,14 @@ func (m *Line) Unmarshal(dAtA []byte) error { m.FunctionIndex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.FunctionIndex |= uint64(b&0x7F) << shift + m.FunctionIndex |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -4636,7 +5305,7 @@ func (m *Line) Unmarshal(dAtA []byte) error { m.Line = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4655,7 +5324,7 @@ func (m *Line) Unmarshal(dAtA []byte) error { m.Column = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4669,12 +5338,12 @@ func (m *Line) Unmarshal(dAtA []byte) error { } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -4696,7 +5365,7 @@ func (m *Function) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4719,88 +5388,69 @@ func (m *Function) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NameStrindex", wireType) } - m.Id = 0 + m.NameStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Id |= uint64(b&0x7F) << shift + m.NameStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SystemNameStrindex", wireType) } - m.Name = 0 + m.SystemNameStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.Name |= int64(b&0x7F) << shift + m.SystemNameStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FilenameStrindex", wireType) } - m.SystemName = 0 + m.FilenameStrindex = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - m.SystemName |= int64(b&0x7F) << shift + m.FilenameStrindex |= int32(b&0x7F) << shift if b < 0x80 { break } } case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - m.Filename = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPprofextended - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Filename |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field StartLine", wireType) } m.StartLine = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowPprofextended + return ErrIntOverflowProfiles } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -4814,12 +5464,12 @@ func (m *Function) Unmarshal(dAtA []byte) error { } default: iNdEx = preIndex - skippy, err := skipPprofextended(dAtA[iNdEx:]) + skippy, err := skipProfiles(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPprofextended + return ErrInvalidLengthProfiles } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -4833,7 +5483,7 @@ func (m *Function) Unmarshal(dAtA []byte) error { } return nil } -func skipPprofextended(dAtA []byte) (n int, err error) { +func skipProfiles(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 @@ -4841,7 +5491,7 @@ func skipPprofextended(dAtA []byte) (n int, err error) { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowPprofextended + return 0, ErrIntOverflowProfiles } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -4858,7 +5508,7 @@ func skipPprofextended(dAtA []byte) (n int, err error) { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowPprofextended + return 0, ErrIntOverflowProfiles } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -4874,7 +5524,7 @@ func skipPprofextended(dAtA []byte) (n int, err error) { var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowPprofextended + return 0, ErrIntOverflowProfiles } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -4887,14 +5537,14 @@ func skipPprofextended(dAtA []byte) (n int, err error) { } } if length < 0 { - return 0, ErrInvalidLengthPprofextended + return 0, ErrInvalidLengthProfiles } iNdEx += length case 3: depth++ case 4: if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPprofextended + return 0, ErrUnexpectedEndOfGroupProfiles } depth-- case 5: @@ -4903,7 +5553,7 @@ func skipPprofextended(dAtA []byte) (n int, err error) { return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { - return 0, ErrInvalidLengthPprofextended + return 0, ErrInvalidLengthProfiles } if depth == 0 { return iNdEx, nil @@ -4913,7 +5563,7 @@ func skipPprofextended(dAtA []byte) (n int, err error) { } var ( - ErrInvalidLengthPprofextended = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPprofextended = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPprofextended = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group") ) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go deleted file mode 100644 index 6e4662c248b..00000000000 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental/profiles.pb.go +++ /dev/null @@ -1,1482 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: opentelemetry/proto/profiles/v1experimental/profiles.proto - -package v1experimental - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - - v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" - v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ProfilesData represents the profiles data that can be stored in persistent storage, -// OR can be embedded by other protocols that transfer OTLP profiles data but do not -// implement the OTLP protocol. -// -// The main difference between this message and collector protocol is that -// in this message there will not be any "control" or "metadata" specific to -// OTLP protocol. -// -// When new fields are added into this message, the OTLP request MUST be updated -// as well. -type ProfilesData struct { - // An array of ResourceProfiles. - // For data coming from a single resource this array will typically contain - // one element. Intermediary nodes that receive data from multiple origins - // typically batch the data before forwarding further and in that case this - // array will contain multiple elements. - ResourceProfiles []*ResourceProfiles `protobuf:"bytes,1,rep,name=resource_profiles,json=resourceProfiles,proto3" json:"resource_profiles,omitempty"` -} - -func (m *ProfilesData) Reset() { *m = ProfilesData{} } -func (m *ProfilesData) String() string { return proto.CompactTextString(m) } -func (*ProfilesData) ProtoMessage() {} -func (*ProfilesData) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{0} -} -func (m *ProfilesData) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfilesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfilesData.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfilesData) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfilesData.Merge(m, src) -} -func (m *ProfilesData) XXX_Size() int { - return m.Size() -} -func (m *ProfilesData) XXX_DiscardUnknown() { - xxx_messageInfo_ProfilesData.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfilesData proto.InternalMessageInfo - -func (m *ProfilesData) GetResourceProfiles() []*ResourceProfiles { - if m != nil { - return m.ResourceProfiles - } - return nil -} - -// A collection of ScopeProfiles from a Resource. -type ResourceProfiles struct { - // The resource for the profiles in this message. - // If this field is not set then no resource info is known. - Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` - // A list of ScopeProfiles that originate from a resource. - ScopeProfiles []*ScopeProfiles `protobuf:"bytes,2,rep,name=scope_profiles,json=scopeProfiles,proto3" json:"scope_profiles,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "scope_profiles" field which have their own schema_url field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ResourceProfiles) Reset() { *m = ResourceProfiles{} } -func (m *ResourceProfiles) String() string { return proto.CompactTextString(m) } -func (*ResourceProfiles) ProtoMessage() {} -func (*ResourceProfiles) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{1} -} -func (m *ResourceProfiles) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceProfiles.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceProfiles) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceProfiles.Merge(m, src) -} -func (m *ResourceProfiles) XXX_Size() int { - return m.Size() -} -func (m *ResourceProfiles) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceProfiles.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceProfiles proto.InternalMessageInfo - -func (m *ResourceProfiles) GetResource() v1.Resource { - if m != nil { - return m.Resource - } - return v1.Resource{} -} - -func (m *ResourceProfiles) GetScopeProfiles() []*ScopeProfiles { - if m != nil { - return m.ScopeProfiles - } - return nil -} - -func (m *ResourceProfiles) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A collection of ProfileContainers produced by an InstrumentationScope. -type ScopeProfiles struct { - // The instrumentation scope information for the profiles in this message. - // Semantically when InstrumentationScope isn't set, it is equivalent with - // an empty instrumentation scope name (unknown). - Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` - // A list of ProfileContainers that originate from an instrumentation scope. - Profiles []*ProfileContainer `protobuf:"bytes,2,rep,name=profiles,proto3" json:"profiles,omitempty"` - // The Schema URL, if known. This is the identifier of the Schema that the metric data - // is recorded in. To learn more about Schema URL see - // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url - // This schema_url applies to all profiles in the "profiles" field. - SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` -} - -func (m *ScopeProfiles) Reset() { *m = ScopeProfiles{} } -func (m *ScopeProfiles) String() string { return proto.CompactTextString(m) } -func (*ScopeProfiles) ProtoMessage() {} -func (*ScopeProfiles) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{2} -} -func (m *ScopeProfiles) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopeProfiles) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ScopeProfiles.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ScopeProfiles) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeProfiles.Merge(m, src) -} -func (m *ScopeProfiles) XXX_Size() int { - return m.Size() -} -func (m *ScopeProfiles) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeProfiles.DiscardUnknown(m) -} - -var xxx_messageInfo_ScopeProfiles proto.InternalMessageInfo - -func (m *ScopeProfiles) GetScope() v11.InstrumentationScope { - if m != nil { - return m.Scope - } - return v11.InstrumentationScope{} -} - -func (m *ScopeProfiles) GetProfiles() []*ProfileContainer { - if m != nil { - return m.Profiles - } - return nil -} - -func (m *ScopeProfiles) GetSchemaUrl() string { - if m != nil { - return m.SchemaUrl - } - return "" -} - -// A ProfileContainer represents a single profile. It wraps pprof profile with OpenTelemetry specific metadata. -type ProfileContainer struct { - // A globally unique identifier for a profile. The ID is a 16-byte array. An ID with - // all zeroes is considered invalid. - // - // This field is required. - ProfileId []byte `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - // start_time_unix_nano is the start time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` - // end_time_unix_nano is the end time of the profile. - // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. - // - // This field is semantically required and it is expected that end_time >= start_time. - EndTimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` - // attributes is a collection of key/value pairs. Note, global attributes - // like server name can be set using the resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - // - // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute - // Attribute keys MUST be unique (it is not allowed to have more than one - // attribute with the same key). - Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` - // dropped_attributes_count is the number of attributes that were discarded. Attributes - // can be discarded because their keys are too long or because there are too many - // attributes. If this value is 0, then no attributes were dropped. - DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - // Specifies format of the original payload. Common values are defined in semantic conventions. [required if original_payload is present] - OriginalPayloadFormat string `protobuf:"bytes,6,opt,name=original_payload_format,json=originalPayloadFormat,proto3" json:"original_payload_format,omitempty"` - // Original payload can be stored in this field. This can be useful for users who want to get the original payload. - // Formats such as JFR are highly extensible and can contain more information than what is defined in this spec. - // Inclusion of original payload should be configurable by the user. Default behavior should be to not include the original payload. - // If the original payload is in pprof format, it SHOULD not be included in this field. - // The field is optional, however if it is present `profile` MUST be present and contain the same profiling information. - OriginalPayload []byte `protobuf:"bytes,7,opt,name=original_payload,json=originalPayload,proto3" json:"original_payload,omitempty"` - // This is a reference to a pprof profile. Required, even when original_payload is present. - Profile Profile `protobuf:"bytes,8,opt,name=profile,proto3" json:"profile"` -} - -func (m *ProfileContainer) Reset() { *m = ProfileContainer{} } -func (m *ProfileContainer) String() string { return proto.CompactTextString(m) } -func (*ProfileContainer) ProtoMessage() {} -func (*ProfileContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_394731f2296acea3, []int{3} -} -func (m *ProfileContainer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProfileContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProfileContainer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProfileContainer) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProfileContainer.Merge(m, src) -} -func (m *ProfileContainer) XXX_Size() int { - return m.Size() -} -func (m *ProfileContainer) XXX_DiscardUnknown() { - xxx_messageInfo_ProfileContainer.DiscardUnknown(m) -} - -var xxx_messageInfo_ProfileContainer proto.InternalMessageInfo - -func (m *ProfileContainer) GetProfileId() []byte { - if m != nil { - return m.ProfileId - } - return nil -} - -func (m *ProfileContainer) GetStartTimeUnixNano() uint64 { - if m != nil { - return m.StartTimeUnixNano - } - return 0 -} - -func (m *ProfileContainer) GetEndTimeUnixNano() uint64 { - if m != nil { - return m.EndTimeUnixNano - } - return 0 -} - -func (m *ProfileContainer) GetAttributes() []v11.KeyValue { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *ProfileContainer) GetDroppedAttributesCount() uint32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -func (m *ProfileContainer) GetOriginalPayloadFormat() string { - if m != nil { - return m.OriginalPayloadFormat - } - return "" -} - -func (m *ProfileContainer) GetOriginalPayload() []byte { - if m != nil { - return m.OriginalPayload - } - return nil -} - -func (m *ProfileContainer) GetProfile() Profile { - if m != nil { - return m.Profile - } - return Profile{} -} - -func init() { - proto.RegisterType((*ProfilesData)(nil), "opentelemetry.proto.profiles.v1experimental.ProfilesData") - proto.RegisterType((*ResourceProfiles)(nil), "opentelemetry.proto.profiles.v1experimental.ResourceProfiles") - proto.RegisterType((*ScopeProfiles)(nil), "opentelemetry.proto.profiles.v1experimental.ScopeProfiles") - proto.RegisterType((*ProfileContainer)(nil), "opentelemetry.proto.profiles.v1experimental.ProfileContainer") -} - -func init() { - proto.RegisterFile("opentelemetry/proto/profiles/v1experimental/profiles.proto", fileDescriptor_394731f2296acea3) -} - -var fileDescriptor_394731f2296acea3 = []byte{ - // 652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x51, 0x6b, 0xdb, 0x3a, - 0x14, 0xc7, 0xe3, 0xa6, 0x4d, 0x52, 0xb5, 0xb9, 0x4d, 0x45, 0xef, 0xbd, 0xa6, 0x70, 0x73, 0x43, - 0x5e, 0x96, 0xae, 0x60, 0x93, 0x76, 0x8c, 0x51, 0x18, 0x63, 0xed, 0x36, 0xe8, 0xca, 0xd6, 0xe0, - 0xb5, 0x85, 0xed, 0xc5, 0xa8, 0xf1, 0x69, 0xa6, 0x61, 0x4b, 0x46, 0x96, 0x43, 0xba, 0x4f, 0xb1, - 0xcf, 0xb1, 0x4f, 0xd2, 0xc7, 0xee, 0x6d, 0x6c, 0x30, 0x46, 0xfb, 0xb2, 0x7e, 0x8b, 0x61, 0x59, - 0xf6, 0x12, 0x93, 0x51, 0xb2, 0x17, 0x23, 0x9f, 0xf3, 0x3f, 0xbf, 0xa3, 0xff, 0x91, 0x10, 0xda, - 0xe1, 0x21, 0x30, 0x09, 0x3e, 0x04, 0x20, 0xc5, 0xb9, 0x1d, 0x0a, 0x2e, 0x79, 0xf2, 0x3d, 0xa3, - 0x3e, 0x44, 0xf6, 0xb0, 0x0b, 0xa3, 0x10, 0x04, 0x0d, 0x80, 0x49, 0xe2, 0xe7, 0x71, 0x4b, 0xc9, - 0xf0, 0xe6, 0x44, 0x6d, 0x1a, 0xb4, 0x72, 0xcd, 0x64, 0xed, 0xfa, 0xda, 0x80, 0x0f, 0x78, 0x8a, - 0x4f, 0x56, 0xa9, 0x7a, 0xfd, 0xee, 0xb4, 0xf6, 0x7d, 0x1e, 0x04, 0x9c, 0xd9, 0xc3, 0xae, 0x5e, - 0x69, 0xad, 0x35, 0x4d, 0x2b, 0x20, 0xe2, 0xb1, 0xe8, 0x43, 0xa2, 0xce, 0xd6, 0x5a, 0xff, 0x68, - 0x26, 0x6b, 0x49, 0x02, 0x46, 0x12, 0x98, 0x07, 0x5e, 0x0a, 0x68, 0xbf, 0x47, 0xcb, 0x3d, 0x2d, - 0x7f, 0x42, 0x24, 0xc1, 0xef, 0xd0, 0x6a, 0xd6, 0xc2, 0xcd, 0x38, 0xa6, 0xd1, 0x2a, 0x77, 0x96, - 0xb6, 0x1e, 0x5a, 0x33, 0xcc, 0xc2, 0x72, 0x34, 0x25, 0xa3, 0x3b, 0x0d, 0x51, 0x88, 0xb4, 0x6f, - 0x0c, 0xd4, 0x28, 0xca, 0xf0, 0x01, 0xaa, 0x65, 0x42, 0xd3, 0x68, 0x19, 0x9d, 0xa5, 0xad, 0x8d, - 0xa9, 0x7d, 0xf3, 0x41, 0x0c, 0xbb, 0x79, 0xaf, 0xdd, 0xf9, 0x8b, 0x6f, 0xff, 0x97, 0x9c, 0x1c, - 0x80, 0x09, 0xfa, 0x2b, 0xea, 0xf3, 0x70, 0xcc, 0xca, 0x9c, 0xb2, 0xb2, 0x33, 0x93, 0x95, 0x57, - 0x09, 0x22, 0xf7, 0x51, 0x8f, 0xc6, 0x7f, 0xf1, 0x7f, 0x08, 0x45, 0xfd, 0xb7, 0x10, 0x10, 0x37, - 0x16, 0xbe, 0x59, 0x6e, 0x19, 0x9d, 0x45, 0x67, 0x31, 0x8d, 0x1c, 0x0b, 0xff, 0x79, 0xa5, 0xf6, - 0xa3, 0xda, 0xb8, 0xa9, 0xb6, 0xbf, 0x18, 0xa8, 0x3e, 0xc1, 0xc1, 0x87, 0x68, 0x41, 0x91, 0xb4, - 0xcb, 0xed, 0xa9, 0x5b, 0xd2, 0x97, 0x63, 0xd8, 0xb5, 0xf6, 0x59, 0x24, 0x45, 0xac, 0x76, 0x24, - 0x29, 0x67, 0x8a, 0xa5, 0xfd, 0xa6, 0x1c, 0xfc, 0x1a, 0xd5, 0x0a, 0x36, 0x67, 0x3b, 0x31, 0xbd, - 0xb3, 0x3d, 0xce, 0x24, 0xa1, 0x0c, 0x84, 0x93, 0xe3, 0x6e, 0x31, 0xd9, 0xfe, 0x54, 0x46, 0x8d, - 0x62, 0x75, 0x52, 0xa3, 0xeb, 0x5d, 0xea, 0x29, 0x93, 0xcb, 0xce, 0xa2, 0x8e, 0xec, 0x7b, 0xd8, - 0x46, 0x6b, 0x91, 0x24, 0x42, 0xba, 0x92, 0x06, 0xe0, 0xc6, 0x8c, 0x8e, 0x5c, 0x46, 0x18, 0x37, - 0xe7, 0x5a, 0x46, 0xa7, 0xe2, 0xac, 0xaa, 0xdc, 0x11, 0x0d, 0xe0, 0x98, 0xd1, 0xd1, 0x4b, 0xc2, - 0x38, 0xde, 0x44, 0x18, 0x98, 0x57, 0x94, 0x97, 0x95, 0x7c, 0x05, 0x98, 0x37, 0x21, 0x7e, 0x81, - 0x10, 0x91, 0x52, 0xd0, 0xd3, 0x58, 0x42, 0x64, 0xce, 0xab, 0x69, 0xdc, 0xb9, 0x65, 0xc2, 0x07, - 0x70, 0x7e, 0x42, 0xfc, 0x38, 0x9b, 0xea, 0x18, 0x00, 0x3f, 0x40, 0xa6, 0x27, 0x78, 0x18, 0x82, - 0xe7, 0xfe, 0x8a, 0xba, 0x7d, 0x1e, 0x33, 0x69, 0x2e, 0xb4, 0x8c, 0x4e, 0xdd, 0xf9, 0x47, 0xe7, - 0x1f, 0xe7, 0xe9, 0xbd, 0x24, 0x8b, 0xef, 0xa3, 0x7f, 0xb9, 0xa0, 0x03, 0xca, 0x88, 0xef, 0x86, - 0xe4, 0xdc, 0xe7, 0xc4, 0x73, 0xcf, 0xb8, 0x08, 0x88, 0x34, 0x2b, 0x6a, 0x8c, 0x7f, 0x67, 0xe9, - 0x5e, 0x9a, 0x7d, 0xa6, 0x92, 0x78, 0x03, 0x35, 0x8a, 0x75, 0x66, 0x55, 0xcd, 0x70, 0xa5, 0x50, - 0x80, 0x8f, 0x50, 0x55, 0x8f, 0xd5, 0xac, 0xa9, 0xab, 0x74, 0xef, 0x4f, 0x8e, 0x5d, 0xbb, 0xce, - 0x50, 0xbb, 0x5f, 0x8d, 0x8b, 0xab, 0xa6, 0x71, 0x79, 0xd5, 0x34, 0xbe, 0x5f, 0x35, 0x8d, 0x0f, - 0xd7, 0xcd, 0xd2, 0xe5, 0x75, 0xb3, 0xf4, 0xf9, 0xba, 0x59, 0x42, 0x16, 0xe5, 0xb3, 0x74, 0xd8, - 0xad, 0x67, 0x77, 0xbe, 0x97, 0xc8, 0x7a, 0xc6, 0x1b, 0x67, 0x50, 0x04, 0xd0, 0xe4, 0x45, 0xf4, - 0x7d, 0xe8, 0x4b, 0x2e, 0xec, 0xd0, 0x23, 0x92, 0xd8, 0x94, 0x49, 0x10, 0x8c, 0xf8, 0xb6, 0xfa, - 0x53, 0x1d, 0x06, 0xc0, 0x7e, 0xf7, 0xb8, 0x7d, 0x9c, 0xdb, 0x3c, 0x0c, 0x81, 0x1d, 0xe5, 0x44, - 0xd5, 0x2b, 0x33, 0x17, 0x59, 0x27, 0xdd, 0xa7, 0x63, 0xea, 0xd3, 0x8a, 0xe2, 0x6d, 0xff, 0x0c, - 0x00, 0x00, 0xff, 0xff, 0xe1, 0x89, 0x49, 0xa4, 0x1b, 0x06, 0x00, 0x00, -} - -func (m *ProfilesData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfilesData) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfilesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceProfiles) > 0 { - for iNdEx := len(m.ResourceProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResourceProfiles) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceProfiles) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.ScopeProfiles) > 0 { - for iNdEx := len(m.ScopeProfiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ScopeProfiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ScopeProfiles) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeProfiles) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScopeProfiles) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.SchemaUrl) > 0 { - i -= len(m.SchemaUrl) - copy(dAtA[i:], m.SchemaUrl) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.SchemaUrl))) - i-- - dAtA[i] = 0x1a - } - if len(m.Profiles) > 0 { - for iNdEx := len(m.Profiles) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Profiles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ProfileContainer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProfileContainer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProfileContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Profile.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.OriginalPayload) > 0 { - i -= len(m.OriginalPayload) - copy(dAtA[i:], m.OriginalPayload) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayload))) - i-- - dAtA[i] = 0x3a - } - if len(m.OriginalPayloadFormat) > 0 { - i -= len(m.OriginalPayloadFormat) - copy(dAtA[i:], m.OriginalPayloadFormat) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.OriginalPayloadFormat))) - i-- - dAtA[i] = 0x32 - } - if m.DroppedAttributesCount != 0 { - i = encodeVarintProfiles(dAtA, i, uint64(m.DroppedAttributesCount)) - i-- - dAtA[i] = 0x28 - } - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProfiles(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.EndTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) - i-- - dAtA[i] = 0x19 - } - if m.StartTimeUnixNano != 0 { - i -= 8 - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) - i-- - dAtA[i] = 0x11 - } - if len(m.ProfileId) > 0 { - i -= len(m.ProfileId) - copy(dAtA[i:], m.ProfileId) - i = encodeVarintProfiles(dAtA, i, uint64(len(m.ProfileId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProfiles(dAtA []byte, offset int, v uint64) int { - offset -= sovProfiles(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProfilesData) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceProfiles) > 0 { - for _, e := range m.ResourceProfiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - return n -} - -func (m *ResourceProfiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resource.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.ScopeProfiles) > 0 { - for _, e := range m.ScopeProfiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - return n -} - -func (m *ScopeProfiles) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Scope.Size() - n += 1 + l + sovProfiles(uint64(l)) - if len(m.Profiles) > 0 { - for _, e := range m.Profiles { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - l = len(m.SchemaUrl) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - return n -} - -func (m *ProfileContainer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProfileId) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - if m.StartTimeUnixNano != 0 { - n += 9 - } - if m.EndTimeUnixNano != 0 { - n += 9 - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProfiles(uint64(l)) - } - } - if m.DroppedAttributesCount != 0 { - n += 1 + sovProfiles(uint64(m.DroppedAttributesCount)) - } - l = len(m.OriginalPayloadFormat) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - l = len(m.OriginalPayload) - if l > 0 { - n += 1 + l + sovProfiles(uint64(l)) - } - l = m.Profile.Size() - n += 1 + l + sovProfiles(uint64(l)) - return n -} - -func sovProfiles(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProfiles(x uint64) (n int) { - return sovProfiles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProfilesData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfilesData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfilesData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceProfiles = append(m.ResourceProfiles, &ResourceProfiles{}) - if err := m.ResourceProfiles[len(m.ResourceProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceProfiles) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceProfiles: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceProfiles: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeProfiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeProfiles = append(m.ScopeProfiles, &ScopeProfiles{}) - if err := m.ScopeProfiles[len(m.ScopeProfiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeProfiles) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeProfiles: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeProfiles: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profiles", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Profiles = append(m.Profiles, &ProfileContainer{}) - if err := m.Profiles[len(m.Profiles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProfileContainer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProfileContainer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProfileContainer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProfileId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProfileId = append(m.ProfileId[:0], dAtA[iNdEx:postIndex]...) - if m.ProfileId == nil { - m.ProfileId = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) - } - m.StartTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) - } - m.EndTimeUnixNano = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v11.KeyValue{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) - } - m.DroppedAttributesCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DroppedAttributesCount |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayloadFormat", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OriginalPayloadFormat = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OriginalPayload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OriginalPayload = append(m.OriginalPayload[:0], dAtA[iNdEx:postIndex]...) - if m.OriginalPayload == nil { - m.OriginalPayload = []byte{} - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Profile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProfiles - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProfiles - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProfiles - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Profile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProfiles(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProfiles - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProfiles(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProfiles - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProfiles - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProfiles - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProfiles - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProfiles = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProfiles = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProfiles = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go new file mode 100644 index 00000000000..31f642d75bd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_int32slice.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type Int32Slice struct { + orig *[]int32 + state *State +} + +func GetOrigInt32Slice(ms Int32Slice) *[]int32 { + return ms.orig +} + +func GetInt32SliceState(ms Int32Slice) *State { + return ms.state +} + +func NewInt32Slice(orig *[]int32, state *State) Int32Slice { + return Int32Slice{orig: orig, state: state} +} + +func FillTestInt32Slice(tv Int32Slice) { +} + +func GenerateTestInt32Slice() Int32Slice { + state := StateMutable + var orig []int32 = nil + + return Int32Slice{&orig, &state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go new file mode 100644 index 00000000000..5f3fe569ba5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_intslice.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type IntSlice struct { + orig *[]int + state *State +} + +func GetOrigIntSlice(ms IntSlice) *[]int { + return ms.orig +} + +func GetIntSliceState(ms IntSlice) *State { + return ms.state +} + +func NewIntSlice(orig *[]int, state *State) IntSlice { + return IntSlice{orig: orig, state: state} +} + +func FillTestIntSlice(tv IntSlice) { +} + +func GenerateTestIntSlice() IntSlice { + state := StateMutable + var orig []int = nil + + return IntSlice{&orig, &state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go index 4fbe193430e..02dd2b7768c 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go @@ -15,7 +15,7 @@ func ReadEnumValue(iter *jsoniter.Iterator, valueMap map[string]int32) int32 { return iter.ReadInt32() case jsoniter.StringValue: val, ok := valueMap[iter.ReadString()] - // Same behavior with official protbuf JSON decoder, + // Same behavior with official protobuf JSON decoder, // see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81 if !ok { iter.ReportError("ReadEnumValue", "unknown string value") diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go new file mode 100644 index 00000000000..59c23cc672b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/profiles.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" + +import ( + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// MigrateProfiles implements any translation needed due to deprecation in OTLP profiles protocol. +// Any pprofile.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. +func MigrateProfiles(_ []*otlpprofiles.ResourceProfiles) {} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go index 564c8945862..2230b079c36 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_profiles.go @@ -4,8 +4,8 @@ package internal // import "go.opentelemetry.io/collector/pdata/internal" import ( - otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental" - otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" ) type Profiles struct { diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go new file mode 100644 index 00000000000..35a40bd079c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_int32slice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// Int32Slice represents a []int32 slice. +// The instance of Int32Slice can be assigned to multiple objects since it's immutable. +// +// Must use NewInt32Slice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Int32Slice internal.Int32Slice + +func (ms Int32Slice) getOrig() *[]int32 { + return internal.GetOrigInt32Slice(internal.Int32Slice(ms)) +} + +func (ms Int32Slice) getState() *internal.State { + return internal.GetInt32SliceState(internal.Int32Slice(ms)) +} + +// NewInt32Slice creates a new empty Int32Slice. +func NewInt32Slice() Int32Slice { + orig := []int32(nil) + state := internal.StateMutable + return Int32Slice(internal.NewInt32Slice(&orig, &state)) +} + +// AsRaw returns a copy of the []int32 slice. +func (ms Int32Slice) AsRaw() []int32 { + return copyInt32Slice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []int32 into the slice Int32Slice. +func (ms Int32Slice) FromRaw(val []int32) { + ms.getState().AssertMutable() + *ms.getOrig() = copyInt32Slice(*ms.getOrig(), val) +} + +// Len returns length of the []int32 slice value. +// Equivalent of len(int32Slice). +func (ms Int32Slice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of int32Slice[i]. +func (ms Int32Slice) At(i int) int32 { + return (*ms.getOrig())[i] +} + +// SetAt sets int32 item at particular index. +// Equivalent of int32Slice[i] = val +func (ms Int32Slice) SetAt(i int, val int32) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures Int32Slice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]int32, len(int32Slice), newCap) +// copy(buf, int32Slice) +// int32Slice = buf +func (ms Int32Slice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]int32, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to Int32Slice. +// Equivalent of int32Slice = append(int32Slice, elms...) +func (ms Int32Slice) Append(elms ...int32) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms Int32Slice) MoveTo(dest Int32Slice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms Int32Slice) CopyTo(dest Int32Slice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyInt32Slice(*dest.getOrig(), *ms.getOrig()) +} + +func copyInt32Slice(dst, src []int32) []int32 { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go new file mode 100644 index 00000000000..1a72889d554 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_intslice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// IntSlice represents a []int slice. +// The instance of IntSlice can be assigned to multiple objects since it's immutable. +// +// Must use NewIntSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type IntSlice internal.IntSlice + +func (ms IntSlice) getOrig() *[]int { + return internal.GetOrigIntSlice(internal.IntSlice(ms)) +} + +func (ms IntSlice) getState() *internal.State { + return internal.GetIntSliceState(internal.IntSlice(ms)) +} + +// NewIntSlice creates a new empty IntSlice. +func NewIntSlice() IntSlice { + orig := []int(nil) + state := internal.StateMutable + return IntSlice(internal.NewIntSlice(&orig, &state)) +} + +// AsRaw returns a copy of the []int slice. +func (ms IntSlice) AsRaw() []int { + return copyIntSlice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []int into the slice IntSlice. +func (ms IntSlice) FromRaw(val []int) { + ms.getState().AssertMutable() + *ms.getOrig() = copyIntSlice(*ms.getOrig(), val) +} + +// Len returns length of the []int slice value. +// Equivalent of len(intSlice). +func (ms IntSlice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of intSlice[i]. +func (ms IntSlice) At(i int) int { + return (*ms.getOrig())[i] +} + +// SetAt sets int item at particular index. +// Equivalent of intSlice[i] = val +func (ms IntSlice) SetAt(i int, val int) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures IntSlice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]int, len(intSlice), newCap) +// copy(buf, intSlice) +// intSlice = buf +func (ms IntSlice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]int, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to IntSlice. +// Equivalent of intSlice = append(intSlice, elms...) +func (ms IntSlice) Append(elms ...int) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms IntSlice) MoveTo(dest IntSlice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms IntSlice) CopyTo(dest IntSlice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyIntSlice(*dest.getOrig(), *ms.getOrig()) +} + +func copyIntSlice(dst, src []int) []int { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go index 5bbfab962b0..91b803922a3 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go @@ -225,6 +225,15 @@ func (m Map) Range(f func(k string, v Value) bool) { } } +// MoveTo moves all key/values from the current map overriding the destination and +// resetting the current instance to its zero value +func (m Map) MoveTo(dest Map) { + m.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *m.getOrig() + *m.getOrig() = nil +} + // CopyTo copies all elements from the current map overriding the destination. func (m Map) CopyTo(dest Map) { dest.getState().AssertMutable() diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go index 5fd1758b1be..666f86f43f6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go @@ -13,11 +13,13 @@ type Timestamp uint64 // NewTimestampFromTime constructs a new Timestamp from the provided time.Time. func NewTimestampFromTime(t time.Time) Timestamp { + // nolint:gosec return Timestamp(uint64(t.UnixNano())) } // AsTime converts this to a time.Time. func (ts Timestamp) AsTime() time.Time { + // nolint:gosec return time.Unix(0, int64(ts)).UTC() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go index 77a84e51758..ad2e1c7ae47 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go @@ -148,6 +148,7 @@ func (v Value) FromRaw(iv any) error { case int64: v.SetInt(tv) case uint: + // nolint:gosec v.SetInt(int64(tv)) case uint8: v.SetInt(int64(tv)) @@ -156,6 +157,7 @@ func (v Value) FromRaw(iv any) error { case uint32: v.SetInt(int64(tv)) case uint64: + // nolint:gosec v.SetInt(int64(tv)) case float32: v.SetDouble(float64(tv)) @@ -401,7 +403,7 @@ func (v Value) AsString() string { // This allows us to avoid using reflection. func float64AsString(f float64) string { if math.IsInf(f, 0) || math.IsNaN(f) { - return fmt.Sprintf("json: unsupported value: %s", strconv.FormatFloat(f, 'g', -1, 64)) + return "json: unsupported value: " + strconv.FormatFloat(f, 'g', -1, 64) } // Convert as if by ES6 number to string conversion. diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE b/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile b/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go new file mode 100644 index 00000000000..ed37cd6f65b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/encoding.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals pprofile.Profiles into bytes. +type Marshaler interface { + // MarshalProfiles the given pprofile.Profiles into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalProfiles(td Profiles) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into pprofile.Profiles. +type Unmarshaler interface { + // UnmarshalProfiles the given bytes into pprofile.Profiles. + // If the error is not nil, the returned pprofile.Profiles cannot be used. + UnmarshalProfiles(buf []byte) (Profiles, error) +} + +// Sizer is an optional interface implemented by the Marshaler, +// that calculates the size of a marshaled Profiles. +type Sizer interface { + // ProfilesSize returns the size in bytes of a marshaled Profiles. + ProfilesSize(td Profiles) int +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go new file mode 100644 index 00000000000..059b89be727 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attribute.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Attribute describes an attribute stored in a profile's attribute table. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewAttribute function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Attribute struct { + orig *v1.KeyValue + state *internal.State +} + +func newAttribute(orig *v1.KeyValue, state *internal.State) Attribute { + return Attribute{orig: orig, state: state} +} + +// NewAttribute creates a new empty Attribute. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewAttribute() Attribute { + state := internal.StateMutable + return newAttribute(&v1.KeyValue{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Attribute) MoveTo(dest Attribute) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = v1.KeyValue{} +} + +// Key returns the key associated with this Attribute. +func (ms Attribute) Key() string { + return ms.orig.Key +} + +// SetKey replaces the key associated with this Attribute. +func (ms Attribute) SetKey(v string) { + ms.state.AssertMutable() + ms.orig.Key = v +} + +// Value returns the value associated with this Attribute. +func (ms Attribute) Value() pcommon.Value { + return pcommon.Value(internal.NewValue(&ms.orig.Value, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Attribute) CopyTo(dest Attribute) { + dest.state.AssertMutable() + dest.SetKey(ms.Key()) + ms.Value().CopyTo(dest.Value()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go new file mode 100644 index 00000000000..731847d000b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributetableslice.go @@ -0,0 +1,136 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// AttributeTableSlice logically represents a slice of Attribute. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewAttributeTableSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AttributeTableSlice struct { + orig *[]v1.KeyValue + state *internal.State +} + +func newAttributeTableSlice(orig *[]v1.KeyValue, state *internal.State) AttributeTableSlice { + return AttributeTableSlice{orig: orig, state: state} +} + +// NewAttributeTableSlice creates a AttributeTableSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewAttributeTableSlice() AttributeTableSlice { + orig := []v1.KeyValue(nil) + state := internal.StateMutable + return newAttributeTableSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewAttributeTableSlice()". +func (es AttributeTableSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es AttributeTableSlice) At(i int) Attribute { + return newAttribute(&(*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new AttributeTableSlice can be initialized: +// +// es := NewAttributeTableSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es AttributeTableSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]v1.KeyValue, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Attribute. +// It returns the newly added Attribute. +func (es AttributeTableSlice) AppendEmpty() Attribute { + es.state.AssertMutable() + *es.orig = append(*es.orig, v1.KeyValue{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es AttributeTableSlice) MoveAndAppendTo(dest AttributeTableSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es AttributeTableSlice) RemoveIf(f func(Attribute) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es AttributeTableSlice) CopyTo(dest AttributeTableSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + } else { + (*dest.orig) = make([]v1.KeyValue, srcLen) + } + for i := range *es.orig { + newAttribute(&(*es.orig)[i], es.state).CopyTo(newAttribute(&(*dest.orig)[i], dest.state)) + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go new file mode 100644 index 00000000000..a7d98f425d4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunit.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// AttributeUnit Represents a mapping between Attribute Keys and Units. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewAttributeUnit function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AttributeUnit struct { + orig *otlpprofiles.AttributeUnit + state *internal.State +} + +func newAttributeUnit(orig *otlpprofiles.AttributeUnit, state *internal.State) AttributeUnit { + return AttributeUnit{orig: orig, state: state} +} + +// NewAttributeUnit creates a new empty AttributeUnit. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewAttributeUnit() AttributeUnit { + state := internal.StateMutable + return newAttributeUnit(&otlpprofiles.AttributeUnit{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms AttributeUnit) MoveTo(dest AttributeUnit) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.AttributeUnit{} +} + +// AttributeKeyStrindex returns the attributekeystrindex associated with this AttributeUnit. +func (ms AttributeUnit) AttributeKeyStrindex() int32 { + return ms.orig.AttributeKeyStrindex +} + +// SetAttributeKeyStrindex replaces the attributekeystrindex associated with this AttributeUnit. +func (ms AttributeUnit) SetAttributeKeyStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.AttributeKeyStrindex = v +} + +// UnitStrindex returns the unitstrindex associated with this AttributeUnit. +func (ms AttributeUnit) UnitStrindex() int32 { + return ms.orig.UnitStrindex +} + +// SetUnitStrindex replaces the unitstrindex associated with this AttributeUnit. +func (ms AttributeUnit) SetUnitStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.UnitStrindex = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms AttributeUnit) CopyTo(dest AttributeUnit) { + dest.state.AssertMutable() + dest.SetAttributeKeyStrindex(ms.AttributeKeyStrindex()) + dest.SetUnitStrindex(ms.UnitStrindex()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go new file mode 100644 index 00000000000..f7d1c41a1e1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_attributeunitslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// AttributeUnitSlice logically represents a slice of AttributeUnit. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewAttributeUnitSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type AttributeUnitSlice struct { + orig *[]*otlpprofiles.AttributeUnit + state *internal.State +} + +func newAttributeUnitSlice(orig *[]*otlpprofiles.AttributeUnit, state *internal.State) AttributeUnitSlice { + return AttributeUnitSlice{orig: orig, state: state} +} + +// NewAttributeUnitSlice creates a AttributeUnitSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewAttributeUnitSlice() AttributeUnitSlice { + orig := []*otlpprofiles.AttributeUnit(nil) + state := internal.StateMutable + return newAttributeUnitSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewAttributeUnitSlice()". +func (es AttributeUnitSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es AttributeUnitSlice) At(i int) AttributeUnit { + return newAttributeUnit((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new AttributeUnitSlice can be initialized: +// +// es := NewAttributeUnitSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es AttributeUnitSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.AttributeUnit, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty AttributeUnit. +// It returns the newly added AttributeUnit. +func (es AttributeUnitSlice) AppendEmpty() AttributeUnit { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.AttributeUnit{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es AttributeUnitSlice) MoveAndAppendTo(dest AttributeUnitSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es AttributeUnitSlice) RemoveIf(f func(AttributeUnit) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es AttributeUnitSlice) CopyTo(dest AttributeUnitSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newAttributeUnit((*es.orig)[i], es.state).CopyTo(newAttributeUnit((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.AttributeUnit, srcLen) + wrappers := make([]*otlpprofiles.AttributeUnit, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newAttributeUnit((*es.orig)[i], es.state).CopyTo(newAttributeUnit(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the AttributeUnit elements within AttributeUnitSlice given the +// provided less function so that two instances of AttributeUnitSlice +// can be compared. +func (es AttributeUnitSlice) Sort(less func(a, b AttributeUnit) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go new file mode 100644 index 00000000000..21fe358de8f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_function.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// Function describes a function, including its human-readable name, system name, source file, and starting line number in the source. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewFunction function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Function struct { + orig *otlpprofiles.Function + state *internal.State +} + +func newFunction(orig *otlpprofiles.Function, state *internal.State) Function { + return Function{orig: orig, state: state} +} + +// NewFunction creates a new empty Function. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewFunction() Function { + state := internal.StateMutable + return newFunction(&otlpprofiles.Function{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Function) MoveTo(dest Function) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Function{} +} + +// NameStrindex returns the namestrindex associated with this Function. +func (ms Function) NameStrindex() int32 { + return ms.orig.NameStrindex +} + +// SetNameStrindex replaces the namestrindex associated with this Function. +func (ms Function) SetNameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.NameStrindex = v +} + +// SystemNameStrindex returns the systemnamestrindex associated with this Function. +func (ms Function) SystemNameStrindex() int32 { + return ms.orig.SystemNameStrindex +} + +// SetSystemNameStrindex replaces the systemnamestrindex associated with this Function. +func (ms Function) SetSystemNameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.SystemNameStrindex = v +} + +// FilenameStrindex returns the filenamestrindex associated with this Function. +func (ms Function) FilenameStrindex() int32 { + return ms.orig.FilenameStrindex +} + +// SetFilenameStrindex replaces the filenamestrindex associated with this Function. +func (ms Function) SetFilenameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.FilenameStrindex = v +} + +// StartLine returns the startline associated with this Function. +func (ms Function) StartLine() int64 { + return ms.orig.StartLine +} + +// SetStartLine replaces the startline associated with this Function. +func (ms Function) SetStartLine(v int64) { + ms.state.AssertMutable() + ms.orig.StartLine = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Function) CopyTo(dest Function) { + dest.state.AssertMutable() + dest.SetNameStrindex(ms.NameStrindex()) + dest.SetSystemNameStrindex(ms.SystemNameStrindex()) + dest.SetFilenameStrindex(ms.FilenameStrindex()) + dest.SetStartLine(ms.StartLine()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go new file mode 100644 index 00000000000..51dd4f2aca1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_functionslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// FunctionSlice logically represents a slice of Function. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewFunctionSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type FunctionSlice struct { + orig *[]*otlpprofiles.Function + state *internal.State +} + +func newFunctionSlice(orig *[]*otlpprofiles.Function, state *internal.State) FunctionSlice { + return FunctionSlice{orig: orig, state: state} +} + +// NewFunctionSlice creates a FunctionSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewFunctionSlice() FunctionSlice { + orig := []*otlpprofiles.Function(nil) + state := internal.StateMutable + return newFunctionSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewFunctionSlice()". +func (es FunctionSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es FunctionSlice) At(i int) Function { + return newFunction((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new FunctionSlice can be initialized: +// +// es := NewFunctionSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es FunctionSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Function, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Function. +// It returns the newly added Function. +func (es FunctionSlice) AppendEmpty() Function { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Function{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es FunctionSlice) MoveAndAppendTo(dest FunctionSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es FunctionSlice) RemoveIf(f func(Function) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es FunctionSlice) CopyTo(dest FunctionSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newFunction((*es.orig)[i], es.state).CopyTo(newFunction((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Function, srcLen) + wrappers := make([]*otlpprofiles.Function, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newFunction((*es.orig)[i], es.state).CopyTo(newFunction(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Function elements within FunctionSlice given the +// provided less function so that two instances of FunctionSlice +// can be compared. +func (es FunctionSlice) Sort(less func(a, b Function) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go new file mode 100644 index 00000000000..daae7435237 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_line.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// Line details a specific line in a source code, linked to a function. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLine function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Line struct { + orig *otlpprofiles.Line + state *internal.State +} + +func newLine(orig *otlpprofiles.Line, state *internal.State) Line { + return Line{orig: orig, state: state} +} + +// NewLine creates a new empty Line. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLine() Line { + state := internal.StateMutable + return newLine(&otlpprofiles.Line{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Line) MoveTo(dest Line) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Line{} +} + +// FunctionIndex returns the functionindex associated with this Line. +func (ms Line) FunctionIndex() int32 { + return ms.orig.FunctionIndex +} + +// SetFunctionIndex replaces the functionindex associated with this Line. +func (ms Line) SetFunctionIndex(v int32) { + ms.state.AssertMutable() + ms.orig.FunctionIndex = v +} + +// Line returns the line associated with this Line. +func (ms Line) Line() int64 { + return ms.orig.Line +} + +// SetLine replaces the line associated with this Line. +func (ms Line) SetLine(v int64) { + ms.state.AssertMutable() + ms.orig.Line = v +} + +// Column returns the column associated with this Line. +func (ms Line) Column() int64 { + return ms.orig.Column +} + +// SetColumn replaces the column associated with this Line. +func (ms Line) SetColumn(v int64) { + ms.state.AssertMutable() + ms.orig.Column = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Line) CopyTo(dest Line) { + dest.state.AssertMutable() + dest.SetFunctionIndex(ms.FunctionIndex()) + dest.SetLine(ms.Line()) + dest.SetColumn(ms.Column()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go new file mode 100644 index 00000000000..d5b94978bb5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_lineslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// LineSlice logically represents a slice of Line. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLineSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LineSlice struct { + orig *[]*otlpprofiles.Line + state *internal.State +} + +func newLineSlice(orig *[]*otlpprofiles.Line, state *internal.State) LineSlice { + return LineSlice{orig: orig, state: state} +} + +// NewLineSlice creates a LineSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLineSlice() LineSlice { + orig := []*otlpprofiles.Line(nil) + state := internal.StateMutable + return newLineSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLineSlice()". +func (es LineSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LineSlice) At(i int) Line { + return newLine((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LineSlice can be initialized: +// +// es := NewLineSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LineSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Line, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Line. +// It returns the newly added Line. +func (es LineSlice) AppendEmpty() Line { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Line{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LineSlice) MoveAndAppendTo(dest LineSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LineSlice) RemoveIf(f func(Line) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LineSlice) CopyTo(dest LineSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLine((*es.orig)[i], es.state).CopyTo(newLine((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Line, srcLen) + wrappers := make([]*otlpprofiles.Line, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLine((*es.orig)[i], es.state).CopyTo(newLine(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Line elements within LineSlice given the +// provided less function so that two instances of LineSlice +// can be compared. +func (es LineSlice) Sort(less func(a, b Line) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go new file mode 100644 index 00000000000..22966f15545 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_link.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Link represents a pointer from a profile Sample to a trace Span. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLink function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Link struct { + orig *otlpprofiles.Link + state *internal.State +} + +func newLink(orig *otlpprofiles.Link, state *internal.State) Link { + return Link{orig: orig, state: state} +} + +// NewLink creates a new empty Link. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLink() Link { + state := internal.StateMutable + return newLink(&otlpprofiles.Link{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Link) MoveTo(dest Link) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Link{} +} + +// TraceID returns the traceid associated with this Link. +func (ms Link) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this Link. +func (ms Link) SetTraceID(v pcommon.TraceID) { + ms.state.AssertMutable() + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this Link. +func (ms Link) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this Link. +func (ms Link) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = data.SpanID(v) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Link) CopyTo(dest Link) { + dest.state.AssertMutable() + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go new file mode 100644 index 00000000000..b2a7a1ab211 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_linkslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// LinkSlice logically represents a slice of Link. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLinkSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LinkSlice struct { + orig *[]*otlpprofiles.Link + state *internal.State +} + +func newLinkSlice(orig *[]*otlpprofiles.Link, state *internal.State) LinkSlice { + return LinkSlice{orig: orig, state: state} +} + +// NewLinkSlice creates a LinkSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLinkSlice() LinkSlice { + orig := []*otlpprofiles.Link(nil) + state := internal.StateMutable + return newLinkSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLinkSlice()". +func (es LinkSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LinkSlice) At(i int) Link { + return newLink((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LinkSlice can be initialized: +// +// es := NewLinkSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LinkSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Link, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Link. +// It returns the newly added Link. +func (es LinkSlice) AppendEmpty() Link { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Link{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LinkSlice) MoveAndAppendTo(dest LinkSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LinkSlice) RemoveIf(f func(Link) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LinkSlice) CopyTo(dest LinkSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLink((*es.orig)[i], es.state).CopyTo(newLink((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Link, srcLen) + wrappers := make([]*otlpprofiles.Link, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLink((*es.orig)[i], es.state).CopyTo(newLink(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Link elements within LinkSlice given the +// provided less function so that two instances of LinkSlice +// can be compared. +func (es LinkSlice) Sort(less func(a, b Link) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go new file mode 100644 index 00000000000..8b968f973b9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_location.go @@ -0,0 +1,115 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Location describes function and line table debug information. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewLocation function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Location struct { + orig *otlpprofiles.Location + state *internal.State +} + +func newLocation(orig *otlpprofiles.Location, state *internal.State) Location { + return Location{orig: orig, state: state} +} + +// NewLocation creates a new empty Location. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewLocation() Location { + state := internal.StateMutable + return newLocation(&otlpprofiles.Location{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Location) MoveTo(dest Location) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Location{} +} + +// MappingIndex returns the mappingindex associated with this Location. +func (ms Location) MappingIndex() int32 { + return ms.orig.GetMappingIndex() +} + +// HasMappingIndex returns true if the Location contains a +// MappingIndex value, false otherwise. +func (ms Location) HasMappingIndex() bool { + return ms.orig.MappingIndex_ != nil +} + +// SetMappingIndex replaces the mappingindex associated with this Location. +func (ms Location) SetMappingIndex(v int32) { + ms.state.AssertMutable() + ms.orig.MappingIndex_ = &otlpprofiles.Location_MappingIndex{MappingIndex: v} +} + +// RemoveMappingIndex removes the mappingindex associated with this Location. +func (ms Location) RemoveMappingIndex() { + ms.state.AssertMutable() + ms.orig.MappingIndex_ = nil +} + +// Address returns the address associated with this Location. +func (ms Location) Address() uint64 { + return ms.orig.Address +} + +// SetAddress replaces the address associated with this Location. +func (ms Location) SetAddress(v uint64) { + ms.state.AssertMutable() + ms.orig.Address = v +} + +// Line returns the Line associated with this Location. +func (ms Location) Line() LineSlice { + return newLineSlice(&ms.orig.Line, ms.state) +} + +// IsFolded returns the isfolded associated with this Location. +func (ms Location) IsFolded() bool { + return ms.orig.IsFolded +} + +// SetIsFolded replaces the isfolded associated with this Location. +func (ms Location) SetIsFolded(v bool) { + ms.state.AssertMutable() + ms.orig.IsFolded = v +} + +// AttributeIndices returns the AttributeIndices associated with this Location. +func (ms Location) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Location) CopyTo(dest Location) { + dest.state.AssertMutable() + if ms.HasMappingIndex() { + dest.SetMappingIndex(ms.MappingIndex()) + } + + dest.SetAddress(ms.Address()) + ms.Line().CopyTo(dest.Line()) + dest.SetIsFolded(ms.IsFolded()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go new file mode 100644 index 00000000000..42039af4ebe --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_locationslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// LocationSlice logically represents a slice of Location. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewLocationSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type LocationSlice struct { + orig *[]*otlpprofiles.Location + state *internal.State +} + +func newLocationSlice(orig *[]*otlpprofiles.Location, state *internal.State) LocationSlice { + return LocationSlice{orig: orig, state: state} +} + +// NewLocationSlice creates a LocationSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewLocationSlice() LocationSlice { + orig := []*otlpprofiles.Location(nil) + state := internal.StateMutable + return newLocationSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewLocationSlice()". +func (es LocationSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es LocationSlice) At(i int) Location { + return newLocation((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new LocationSlice can be initialized: +// +// es := NewLocationSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es LocationSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Location, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Location. +// It returns the newly added Location. +func (es LocationSlice) AppendEmpty() Location { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Location{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es LocationSlice) MoveAndAppendTo(dest LocationSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es LocationSlice) RemoveIf(f func(Location) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es LocationSlice) CopyTo(dest LocationSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newLocation((*es.orig)[i], es.state).CopyTo(newLocation((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Location, srcLen) + wrappers := make([]*otlpprofiles.Location, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newLocation((*es.orig)[i], es.state).CopyTo(newLocation(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Location elements within LocationSlice given the +// provided less function so that two instances of LocationSlice +// can be compared. +func (es LocationSlice) Sort(less func(a, b Location) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go new file mode 100644 index 00000000000..65ef57fa4db --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mapping.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Mapping describes the mapping of a binary in memory, including its address range, file offset, and metadata like build ID +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewMapping function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Mapping struct { + orig *otlpprofiles.Mapping + state *internal.State +} + +func newMapping(orig *otlpprofiles.Mapping, state *internal.State) Mapping { + return Mapping{orig: orig, state: state} +} + +// NewMapping creates a new empty Mapping. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewMapping() Mapping { + state := internal.StateMutable + return newMapping(&otlpprofiles.Mapping{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Mapping) MoveTo(dest Mapping) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Mapping{} +} + +// MemoryStart returns the memorystart associated with this Mapping. +func (ms Mapping) MemoryStart() uint64 { + return ms.orig.MemoryStart +} + +// SetMemoryStart replaces the memorystart associated with this Mapping. +func (ms Mapping) SetMemoryStart(v uint64) { + ms.state.AssertMutable() + ms.orig.MemoryStart = v +} + +// MemoryLimit returns the memorylimit associated with this Mapping. +func (ms Mapping) MemoryLimit() uint64 { + return ms.orig.MemoryLimit +} + +// SetMemoryLimit replaces the memorylimit associated with this Mapping. +func (ms Mapping) SetMemoryLimit(v uint64) { + ms.state.AssertMutable() + ms.orig.MemoryLimit = v +} + +// FileOffset returns the fileoffset associated with this Mapping. +func (ms Mapping) FileOffset() uint64 { + return ms.orig.FileOffset +} + +// SetFileOffset replaces the fileoffset associated with this Mapping. +func (ms Mapping) SetFileOffset(v uint64) { + ms.state.AssertMutable() + ms.orig.FileOffset = v +} + +// FilenameStrindex returns the filenamestrindex associated with this Mapping. +func (ms Mapping) FilenameStrindex() int32 { + return ms.orig.FilenameStrindex +} + +// SetFilenameStrindex replaces the filenamestrindex associated with this Mapping. +func (ms Mapping) SetFilenameStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.FilenameStrindex = v +} + +// AttributeIndices returns the AttributeIndices associated with this Mapping. +func (ms Mapping) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// HasFunctions returns the hasfunctions associated with this Mapping. +func (ms Mapping) HasFunctions() bool { + return ms.orig.HasFunctions +} + +// SetHasFunctions replaces the hasfunctions associated with this Mapping. +func (ms Mapping) SetHasFunctions(v bool) { + ms.state.AssertMutable() + ms.orig.HasFunctions = v +} + +// HasFilenames returns the hasfilenames associated with this Mapping. +func (ms Mapping) HasFilenames() bool { + return ms.orig.HasFilenames +} + +// SetHasFilenames replaces the hasfilenames associated with this Mapping. +func (ms Mapping) SetHasFilenames(v bool) { + ms.state.AssertMutable() + ms.orig.HasFilenames = v +} + +// HasLineNumbers returns the haslinenumbers associated with this Mapping. +func (ms Mapping) HasLineNumbers() bool { + return ms.orig.HasLineNumbers +} + +// SetHasLineNumbers replaces the haslinenumbers associated with this Mapping. +func (ms Mapping) SetHasLineNumbers(v bool) { + ms.state.AssertMutable() + ms.orig.HasLineNumbers = v +} + +// HasInlineFrames returns the hasinlineframes associated with this Mapping. +func (ms Mapping) HasInlineFrames() bool { + return ms.orig.HasInlineFrames +} + +// SetHasInlineFrames replaces the hasinlineframes associated with this Mapping. +func (ms Mapping) SetHasInlineFrames(v bool) { + ms.state.AssertMutable() + ms.orig.HasInlineFrames = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Mapping) CopyTo(dest Mapping) { + dest.state.AssertMutable() + dest.SetMemoryStart(ms.MemoryStart()) + dest.SetMemoryLimit(ms.MemoryLimit()) + dest.SetFileOffset(ms.FileOffset()) + dest.SetFilenameStrindex(ms.FilenameStrindex()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) + dest.SetHasFunctions(ms.HasFunctions()) + dest.SetHasFilenames(ms.HasFilenames()) + dest.SetHasLineNumbers(ms.HasLineNumbers()) + dest.SetHasInlineFrames(ms.HasInlineFrames()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go new file mode 100644 index 00000000000..af20e2dc4fc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_mappingslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// MappingSlice logically represents a slice of Mapping. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewMappingSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type MappingSlice struct { + orig *[]*otlpprofiles.Mapping + state *internal.State +} + +func newMappingSlice(orig *[]*otlpprofiles.Mapping, state *internal.State) MappingSlice { + return MappingSlice{orig: orig, state: state} +} + +// NewMappingSlice creates a MappingSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewMappingSlice() MappingSlice { + orig := []*otlpprofiles.Mapping(nil) + state := internal.StateMutable + return newMappingSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewMappingSlice()". +func (es MappingSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es MappingSlice) At(i int) Mapping { + return newMapping((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new MappingSlice can be initialized: +// +// es := NewMappingSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es MappingSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Mapping, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Mapping. +// It returns the newly added Mapping. +func (es MappingSlice) AppendEmpty() Mapping { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Mapping{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es MappingSlice) MoveAndAppendTo(dest MappingSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es MappingSlice) RemoveIf(f func(Mapping) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es MappingSlice) CopyTo(dest MappingSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newMapping((*es.orig)[i], es.state).CopyTo(newMapping((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Mapping, srcLen) + wrappers := make([]*otlpprofiles.Mapping, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newMapping((*es.orig)[i], es.state).CopyTo(newMapping(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Mapping elements within MappingSlice given the +// provided less function so that two instances of MappingSlice +// can be compared. +func (es MappingSlice) Sort(less func(a, b Mapping) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go new file mode 100644 index 00000000000..c2c83070b37 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profile.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Profile are an implementation of the pprofextended data model. + +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewProfile function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Profile struct { + orig *otlpprofiles.Profile + state *internal.State +} + +func newProfile(orig *otlpprofiles.Profile, state *internal.State) Profile { + return Profile{orig: orig, state: state} +} + +// NewProfile creates a new empty Profile. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewProfile() Profile { + state := internal.StateMutable + return newProfile(&otlpprofiles.Profile{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Profile) MoveTo(dest Profile) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Profile{} +} + +// SampleType returns the SampleType associated with this Profile. +func (ms Profile) SampleType() ValueTypeSlice { + return newValueTypeSlice(&ms.orig.SampleType, ms.state) +} + +// Sample returns the Sample associated with this Profile. +func (ms Profile) Sample() SampleSlice { + return newSampleSlice(&ms.orig.Sample, ms.state) +} + +// MappingTable returns the MappingTable associated with this Profile. +func (ms Profile) MappingTable() MappingSlice { + return newMappingSlice(&ms.orig.MappingTable, ms.state) +} + +// LocationTable returns the LocationTable associated with this Profile. +func (ms Profile) LocationTable() LocationSlice { + return newLocationSlice(&ms.orig.LocationTable, ms.state) +} + +// LocationIndices returns the LocationIndices associated with this Profile. +func (ms Profile) LocationIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.LocationIndices, ms.state)) +} + +// FunctionTable returns the FunctionTable associated with this Profile. +func (ms Profile) FunctionTable() FunctionSlice { + return newFunctionSlice(&ms.orig.FunctionTable, ms.state) +} + +// AttributeTable returns the AttributeTable associated with this Profile. +func (ms Profile) AttributeTable() AttributeTableSlice { + return newAttributeTableSlice(&ms.orig.AttributeTable, ms.state) +} + +// AttributeUnits returns the AttributeUnits associated with this Profile. +func (ms Profile) AttributeUnits() AttributeUnitSlice { + return newAttributeUnitSlice(&ms.orig.AttributeUnits, ms.state) +} + +// LinkTable returns the LinkTable associated with this Profile. +func (ms Profile) LinkTable() LinkSlice { + return newLinkSlice(&ms.orig.LinkTable, ms.state) +} + +// StringTable returns the StringTable associated with this Profile. +func (ms Profile) StringTable() pcommon.StringSlice { + return pcommon.StringSlice(internal.NewStringSlice(&ms.orig.StringTable, ms.state)) +} + +// Time returns the time associated with this Profile. +func (ms Profile) Time() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeNanos) +} + +// SetTime replaces the time associated with this Profile. +func (ms Profile) SetTime(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeNanos = int64(v) +} + +// Duration returns the duration associated with this Profile. +func (ms Profile) Duration() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.DurationNanos) +} + +// SetDuration replaces the duration associated with this Profile. +func (ms Profile) SetDuration(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.DurationNanos = int64(v) +} + +// StartTime returns the starttime associated with this Profile. +func (ms Profile) StartTime() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeNanos) +} + +// SetStartTime replaces the starttime associated with this Profile. +func (ms Profile) SetStartTime(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeNanos = int64(v) +} + +// PeriodType returns the periodtype associated with this Profile. +func (ms Profile) PeriodType() ValueType { + return newValueType(&ms.orig.PeriodType, ms.state) +} + +// Period returns the period associated with this Profile. +func (ms Profile) Period() int64 { + return ms.orig.Period +} + +// SetPeriod replaces the period associated with this Profile. +func (ms Profile) SetPeriod(v int64) { + ms.state.AssertMutable() + ms.orig.Period = v +} + +// CommentStrindices returns the CommentStrindices associated with this Profile. +func (ms Profile) CommentStrindices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.CommentStrindices, ms.state)) +} + +// DefaultSampleTypeStrindex returns the defaultsampletypestrindex associated with this Profile. +func (ms Profile) DefaultSampleTypeStrindex() int32 { + return ms.orig.DefaultSampleTypeStrindex +} + +// SetDefaultSampleTypeStrindex replaces the defaultsampletypestrindex associated with this Profile. +func (ms Profile) SetDefaultSampleTypeStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.DefaultSampleTypeStrindex = v +} + +// ProfileID returns the profileid associated with this Profile. +func (ms Profile) ProfileID() ProfileID { + return ProfileID(ms.orig.ProfileId) +} + +// SetProfileID replaces the profileid associated with this Profile. +func (ms Profile) SetProfileID(v ProfileID) { + ms.state.AssertMutable() + ms.orig.ProfileId = data.ProfileID(v) +} + +// Attributes returns the Attributes associated with this Profile. +func (ms Profile) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this Profile. +func (ms Profile) DroppedAttributesCount() uint32 { + return ms.orig.DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this Profile. +func (ms Profile) SetDroppedAttributesCount(v uint32) { + ms.state.AssertMutable() + ms.orig.DroppedAttributesCount = v +} + +// OriginalPayloadFormat returns the originalpayloadformat associated with this Profile. +func (ms Profile) OriginalPayloadFormat() string { + return ms.orig.OriginalPayloadFormat +} + +// SetOriginalPayloadFormat replaces the originalpayloadformat associated with this Profile. +func (ms Profile) SetOriginalPayloadFormat(v string) { + ms.state.AssertMutable() + ms.orig.OriginalPayloadFormat = v +} + +// OriginalPayload returns the OriginalPayload associated with this Profile. +func (ms Profile) OriginalPayload() pcommon.ByteSlice { + return pcommon.ByteSlice(internal.NewByteSlice(&ms.orig.OriginalPayload, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Profile) CopyTo(dest Profile) { + dest.state.AssertMutable() + ms.SampleType().CopyTo(dest.SampleType()) + ms.Sample().CopyTo(dest.Sample()) + ms.MappingTable().CopyTo(dest.MappingTable()) + ms.LocationTable().CopyTo(dest.LocationTable()) + ms.LocationIndices().CopyTo(dest.LocationIndices()) + ms.FunctionTable().CopyTo(dest.FunctionTable()) + ms.AttributeTable().CopyTo(dest.AttributeTable()) + ms.AttributeUnits().CopyTo(dest.AttributeUnits()) + ms.LinkTable().CopyTo(dest.LinkTable()) + ms.StringTable().CopyTo(dest.StringTable()) + dest.SetTime(ms.Time()) + dest.SetDuration(ms.Duration()) + dest.SetStartTime(ms.StartTime()) + ms.PeriodType().CopyTo(dest.PeriodType()) + dest.SetPeriod(ms.Period()) + ms.CommentStrindices().CopyTo(dest.CommentStrindices()) + dest.SetDefaultSampleTypeStrindex(ms.DefaultSampleTypeStrindex()) + dest.SetProfileID(ms.ProfileID()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) + dest.SetOriginalPayloadFormat(ms.OriginalPayloadFormat()) + ms.OriginalPayload().CopyTo(dest.OriginalPayload()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go new file mode 100644 index 00000000000..69a6309bbb8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_profilesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ProfilesSlice logically represents a slice of Profile. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewProfilesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ProfilesSlice struct { + orig *[]*otlpprofiles.Profile + state *internal.State +} + +func newProfilesSlice(orig *[]*otlpprofiles.Profile, state *internal.State) ProfilesSlice { + return ProfilesSlice{orig: orig, state: state} +} + +// NewProfilesSlice creates a ProfilesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewProfilesSlice() ProfilesSlice { + orig := []*otlpprofiles.Profile(nil) + state := internal.StateMutable + return newProfilesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewProfilesSlice()". +func (es ProfilesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ProfilesSlice) At(i int) Profile { + return newProfile((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ProfilesSlice can be initialized: +// +// es := NewProfilesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ProfilesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Profile, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Profile. +// It returns the newly added Profile. +func (es ProfilesSlice) AppendEmpty() Profile { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Profile{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ProfilesSlice) MoveAndAppendTo(dest ProfilesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ProfilesSlice) RemoveIf(f func(Profile) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ProfilesSlice) CopyTo(dest ProfilesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newProfile((*es.orig)[i], es.state).CopyTo(newProfile((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Profile, srcLen) + wrappers := make([]*otlpprofiles.Profile, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newProfile((*es.orig)[i], es.state).CopyTo(newProfile(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Profile elements within ProfilesSlice given the +// provided less function so that two instances of ProfilesSlice +// can be compared. +func (es ProfilesSlice) Sort(less func(a, b Profile) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go new file mode 100644 index 00000000000..649e5bbf33d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofiles.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceProfiles is a collection of profiles from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceProfiles function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceProfiles struct { + orig *otlpprofiles.ResourceProfiles + state *internal.State +} + +func newResourceProfiles(orig *otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfiles { + return ResourceProfiles{orig: orig, state: state} +} + +// NewResourceProfiles creates a new empty ResourceProfiles. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceProfiles() ResourceProfiles { + state := internal.StateMutable + return newResourceProfiles(&otlpprofiles.ResourceProfiles{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceProfiles) MoveTo(dest ResourceProfiles) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.ResourceProfiles{} +} + +// Resource returns the resource associated with this ResourceProfiles. +func (ms ResourceProfiles) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceProfiles. +func (ms ResourceProfiles) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceProfiles. +func (ms ResourceProfiles) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// ScopeProfiles returns the ScopeProfiles associated with this ResourceProfiles. +func (ms ResourceProfiles) ScopeProfiles() ScopeProfilesSlice { + return newScopeProfilesSlice(&ms.orig.ScopeProfiles, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceProfiles) CopyTo(dest ResourceProfiles) { + dest.state.AssertMutable() + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeProfiles().CopyTo(dest.ScopeProfiles()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go new file mode 100644 index 00000000000..c3f7f1053a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_resourceprofilesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ResourceProfilesSlice logically represents a slice of ResourceProfiles. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceProfilesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceProfilesSlice struct { + orig *[]*otlpprofiles.ResourceProfiles + state *internal.State +} + +func newResourceProfilesSlice(orig *[]*otlpprofiles.ResourceProfiles, state *internal.State) ResourceProfilesSlice { + return ResourceProfilesSlice{orig: orig, state: state} +} + +// NewResourceProfilesSlice creates a ResourceProfilesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceProfilesSlice() ResourceProfilesSlice { + orig := []*otlpprofiles.ResourceProfiles(nil) + state := internal.StateMutable + return newResourceProfilesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceProfilesSlice()". +func (es ResourceProfilesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceProfilesSlice) At(i int) ResourceProfiles { + return newResourceProfiles((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceProfilesSlice can be initialized: +// +// es := NewResourceProfilesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceProfilesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.ResourceProfiles, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceProfiles. +// It returns the newly added ResourceProfiles. +func (es ResourceProfilesSlice) AppendEmpty() ResourceProfiles { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.ResourceProfiles{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceProfilesSlice) MoveAndAppendTo(dest ResourceProfilesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceProfilesSlice) RemoveIf(f func(ResourceProfiles) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceProfilesSlice) CopyTo(dest ResourceProfilesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceProfiles((*es.orig)[i], es.state).CopyTo(newResourceProfiles((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.ResourceProfiles, srcLen) + wrappers := make([]*otlpprofiles.ResourceProfiles, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceProfiles((*es.orig)[i], es.state).CopyTo(newResourceProfiles(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceProfiles elements within ResourceProfilesSlice given the +// provided less function so that two instances of ResourceProfilesSlice +// can be compared. +func (es ResourceProfilesSlice) Sort(less func(a, b ResourceProfiles) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go new file mode 100644 index 00000000000..c62027753d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sample.go @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Sample represents each record value encountered within a profiled program. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSample function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Sample struct { + orig *otlpprofiles.Sample + state *internal.State +} + +func newSample(orig *otlpprofiles.Sample, state *internal.State) Sample { + return Sample{orig: orig, state: state} +} + +// NewSample creates a new empty Sample. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSample() Sample { + state := internal.StateMutable + return newSample(&otlpprofiles.Sample{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Sample) MoveTo(dest Sample) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.Sample{} +} + +// LocationsStartIndex returns the locationsstartindex associated with this Sample. +func (ms Sample) LocationsStartIndex() int32 { + return ms.orig.LocationsStartIndex +} + +// SetLocationsStartIndex replaces the locationsstartindex associated with this Sample. +func (ms Sample) SetLocationsStartIndex(v int32) { + ms.state.AssertMutable() + ms.orig.LocationsStartIndex = v +} + +// LocationsLength returns the locationslength associated with this Sample. +func (ms Sample) LocationsLength() int32 { + return ms.orig.LocationsLength +} + +// SetLocationsLength replaces the locationslength associated with this Sample. +func (ms Sample) SetLocationsLength(v int32) { + ms.state.AssertMutable() + ms.orig.LocationsLength = v +} + +// Value returns the Value associated with this Sample. +func (ms Sample) Value() pcommon.Int64Slice { + return pcommon.Int64Slice(internal.NewInt64Slice(&ms.orig.Value, ms.state)) +} + +// AttributeIndices returns the AttributeIndices associated with this Sample. +func (ms Sample) AttributeIndices() pcommon.Int32Slice { + return pcommon.Int32Slice(internal.NewInt32Slice(&ms.orig.AttributeIndices, ms.state)) +} + +// TimestampsUnixNano returns the TimestampsUnixNano associated with this Sample. +func (ms Sample) TimestampsUnixNano() pcommon.UInt64Slice { + return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.TimestampsUnixNano, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Sample) CopyTo(dest Sample) { + dest.state.AssertMutable() + dest.SetLocationsStartIndex(ms.LocationsStartIndex()) + dest.SetLocationsLength(ms.LocationsLength()) + ms.Value().CopyTo(dest.Value()) + ms.AttributeIndices().CopyTo(dest.AttributeIndices()) + ms.TimestampsUnixNano().CopyTo(dest.TimestampsUnixNano()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go new file mode 100644 index 00000000000..cf624434824 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_sampleslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// SampleSlice logically represents a slice of Sample. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSampleSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SampleSlice struct { + orig *[]*otlpprofiles.Sample + state *internal.State +} + +func newSampleSlice(orig *[]*otlpprofiles.Sample, state *internal.State) SampleSlice { + return SampleSlice{orig: orig, state: state} +} + +// NewSampleSlice creates a SampleSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSampleSlice() SampleSlice { + orig := []*otlpprofiles.Sample(nil) + state := internal.StateMutable + return newSampleSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSampleSlice()". +func (es SampleSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SampleSlice) At(i int) Sample { + return newSample((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SampleSlice can be initialized: +// +// es := NewSampleSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SampleSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.Sample, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Sample. +// It returns the newly added Sample. +func (es SampleSlice) AppendEmpty() Sample { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.Sample{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SampleSlice) MoveAndAppendTo(dest SampleSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SampleSlice) RemoveIf(f func(Sample) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SampleSlice) CopyTo(dest SampleSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSample((*es.orig)[i], es.state).CopyTo(newSample((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.Sample, srcLen) + wrappers := make([]*otlpprofiles.Sample, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSample((*es.orig)[i], es.state).CopyTo(newSample(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Sample elements within SampleSlice given the +// provided less function so that two instances of SampleSlice +// can be compared. +func (es SampleSlice) Sort(less func(a, b Sample) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go new file mode 100644 index 00000000000..920b578a5be --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofiles.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeProfiles is a collection of profiles from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeProfiles function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeProfiles struct { + orig *otlpprofiles.ScopeProfiles + state *internal.State +} + +func newScopeProfiles(orig *otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfiles { + return ScopeProfiles{orig: orig, state: state} +} + +// NewScopeProfiles creates a new empty ScopeProfiles. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeProfiles() ScopeProfiles { + state := internal.StateMutable + return newScopeProfiles(&otlpprofiles.ScopeProfiles{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeProfiles) MoveTo(dest ScopeProfiles) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.ScopeProfiles{} +} + +// Scope returns the scope associated with this ScopeProfiles. +func (ms ScopeProfiles) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeProfiles. +func (ms ScopeProfiles) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeProfiles. +func (ms ScopeProfiles) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// Profiles returns the Profiles associated with this ScopeProfiles. +func (ms ScopeProfiles) Profiles() ProfilesSlice { + return newProfilesSlice(&ms.orig.Profiles, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeProfiles) CopyTo(dest ScopeProfiles) { + dest.state.AssertMutable() + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.Profiles().CopyTo(dest.Profiles()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go new file mode 100644 index 00000000000..3f2b452cece --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_scopeprofilesslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ScopeProfilesSlice logically represents a slice of ScopeProfiles. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeProfilesSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeProfilesSlice struct { + orig *[]*otlpprofiles.ScopeProfiles + state *internal.State +} + +func newScopeProfilesSlice(orig *[]*otlpprofiles.ScopeProfiles, state *internal.State) ScopeProfilesSlice { + return ScopeProfilesSlice{orig: orig, state: state} +} + +// NewScopeProfilesSlice creates a ScopeProfilesSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeProfilesSlice() ScopeProfilesSlice { + orig := []*otlpprofiles.ScopeProfiles(nil) + state := internal.StateMutable + return newScopeProfilesSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeProfilesSlice()". +func (es ScopeProfilesSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeProfilesSlice) At(i int) ScopeProfiles { + return newScopeProfiles((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeProfilesSlice can be initialized: +// +// es := NewScopeProfilesSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeProfilesSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.ScopeProfiles, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeProfiles. +// It returns the newly added ScopeProfiles. +func (es ScopeProfilesSlice) AppendEmpty() ScopeProfiles { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.ScopeProfiles{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeProfilesSlice) MoveAndAppendTo(dest ScopeProfilesSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeProfilesSlice) RemoveIf(f func(ScopeProfiles) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeProfilesSlice) CopyTo(dest ScopeProfilesSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeProfiles((*es.orig)[i], es.state).CopyTo(newScopeProfiles((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.ScopeProfiles, srcLen) + wrappers := make([]*otlpprofiles.ScopeProfiles, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeProfiles((*es.orig)[i], es.state).CopyTo(newScopeProfiles(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeProfiles elements within ScopeProfilesSlice given the +// provided less function so that two instances of ScopeProfilesSlice +// can be compared. +func (es ScopeProfilesSlice) Sort(less func(a, b ScopeProfiles) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go new file mode 100644 index 00000000000..2afc8614048 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetype.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ValueType describes the type and units of a value, with an optional aggregation temporality. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewValueType function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ValueType struct { + orig *otlpprofiles.ValueType + state *internal.State +} + +func newValueType(orig *otlpprofiles.ValueType, state *internal.State) ValueType { + return ValueType{orig: orig, state: state} +} + +// NewValueType creates a new empty ValueType. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewValueType() ValueType { + state := internal.StateMutable + return newValueType(&otlpprofiles.ValueType{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ValueType) MoveTo(dest ValueType) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpprofiles.ValueType{} +} + +// TypeStrindex returns the typestrindex associated with this ValueType. +func (ms ValueType) TypeStrindex() int32 { + return ms.orig.TypeStrindex +} + +// SetTypeStrindex replaces the typestrindex associated with this ValueType. +func (ms ValueType) SetTypeStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.TypeStrindex = v +} + +// UnitStrindex returns the unitstrindex associated with this ValueType. +func (ms ValueType) UnitStrindex() int32 { + return ms.orig.UnitStrindex +} + +// SetUnitStrindex replaces the unitstrindex associated with this ValueType. +func (ms ValueType) SetUnitStrindex(v int32) { + ms.state.AssertMutable() + ms.orig.UnitStrindex = v +} + +// AggregationTemporality returns the aggregationtemporality associated with this ValueType. +func (ms ValueType) AggregationTemporality() otlpprofiles.AggregationTemporality { + return ms.orig.AggregationTemporality +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this ValueType. +func (ms ValueType) SetAggregationTemporality(v otlpprofiles.AggregationTemporality) { + ms.state.AssertMutable() + ms.orig.AggregationTemporality = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ValueType) CopyTo(dest ValueType) { + dest.state.AssertMutable() + dest.SetTypeStrindex(ms.TypeStrindex()) + dest.SetUnitStrindex(ms.UnitStrindex()) + dest.SetAggregationTemporality(ms.AggregationTemporality()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go new file mode 100644 index 00000000000..65d5f78bd7d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/generated_valuetypeslice.go @@ -0,0 +1,152 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofile + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +// ValueTypeSlice logically represents a slice of ValueType. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewValueTypeSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ValueTypeSlice struct { + orig *[]*otlpprofiles.ValueType + state *internal.State +} + +func newValueTypeSlice(orig *[]*otlpprofiles.ValueType, state *internal.State) ValueTypeSlice { + return ValueTypeSlice{orig: orig, state: state} +} + +// NewValueTypeSlice creates a ValueTypeSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewValueTypeSlice() ValueTypeSlice { + orig := []*otlpprofiles.ValueType(nil) + state := internal.StateMutable + return newValueTypeSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewValueTypeSlice()". +func (es ValueTypeSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ValueTypeSlice) At(i int) ValueType { + return newValueType((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ValueTypeSlice can be initialized: +// +// es := NewValueTypeSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ValueTypeSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpprofiles.ValueType, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ValueType. +// It returns the newly added ValueType. +func (es ValueTypeSlice) AppendEmpty() ValueType { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpprofiles.ValueType{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ValueTypeSlice) MoveAndAppendTo(dest ValueTypeSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ValueTypeSlice) RemoveIf(f func(ValueType) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ValueTypeSlice) CopyTo(dest ValueTypeSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newValueType((*es.orig)[i], es.state).CopyTo(newValueType((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpprofiles.ValueType, srcLen) + wrappers := make([]*otlpprofiles.ValueType, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newValueType((*es.orig)[i], es.state).CopyTo(newValueType(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ValueType elements within ValueTypeSlice given the +// provided less function so that two instances of ValueTypeSlice +// can be compared. +func (es ValueTypeSlice) Sort(less func(a, b ValueType) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go new file mode 100644 index 00000000000..cac811d4f30 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/json.go @@ -0,0 +1,356 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "bytes" + "fmt" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpprofiles "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// JSONMarshaler marshals pprofile.Profiles to JSON bytes using the OTLP/JSON format. +type JSONMarshaler struct{} + +// MarshalProfiles to the OTLP/JSON format. +func (*JSONMarshaler) MarshalProfiles(td Profiles) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.ProfilesToProto(internal.Profiles(td)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +// JSONUnmarshaler unmarshals OTLP/JSON formatted-bytes to pprofile.Profiles. +type JSONUnmarshaler struct{} + +// UnmarshalProfiles from OTLP/JSON format into pprofile.Profiles. +func (*JSONUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + td := NewProfiles() + td.unmarshalJsoniter(iter) + if iter.Error != nil { + return Profiles{}, iter.Error + } + otlp.MigrateProfiles(td.getOrig().ResourceProfiles) + return td, nil +} + +func (p Profiles) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resourceProfiles", "resource_profiles": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.ResourceProfiles().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (rp ResourceProfiles) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, internal.GetOrigResource(internal.Resource(rp.Resource()))) + case "scopeProfiles", "scope_profiles": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + rp.ScopeProfiles().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + rp.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (sp ScopeProfiles) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &sp.orig.Scope) + case "profiles": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + sp.Profiles().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + sp.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (p Profile) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "profileId", "profile_id": + if err := p.orig.ProfileId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("profileContainer.profileId", fmt.Sprintf("parse profile_id:%v", err)) + } + case "sampleType", "sample_type": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.SampleType().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "sample": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.Sample().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "mappingTable", "mapping_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.MappingTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "locationTable", "location_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.LocationTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "locationIndices", "location_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.LocationIndices().Append(json.ReadInt32(iter)) + return true + }) + case "functionTable", "function_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.FunctionTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "attributeTable", "attribute_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.orig.AttributeTable = append(p.orig.AttributeTable, json.ReadAttribute(iter)) + return true + }) + case "attributeUnits", "attribute_units": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.AttributeUnits().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "linkTable", "link_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.LinkTable().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "stringTable", "string_table": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.StringTable().Append(iter.ReadString()) + return true + }) + case "timeNanos", "time_nanos": + p.orig.TimeNanos = json.ReadInt64(iter) + case "durationNanos", "duration_nanos": + p.orig.DurationNanos = json.ReadInt64(iter) + case "periodType", "period_type": + p.PeriodType().unmarshalJsoniter(iter) + case "period": + p.orig.Period = json.ReadInt64(iter) + case "commentStrindices", "comment_strindices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.CommentStrindices().Append(json.ReadInt32(iter)) + return true + }) + case "defaultSampleTypeStrindex", "default_sample_type_strindex": + p.orig.DefaultSampleTypeStrindex = json.ReadInt32(iter) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + p.orig.Attributes = append(p.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + p.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "originalPayloadFormat", "original_payload_format": + p.orig.OriginalPayloadFormat = iter.ReadString() + case "originalPayload", "original_payload": + p.orig.OriginalPayload = iter.ReadStringAsSlice() + default: + iter.Skip() + } + return true + }) +} + +func (vt ValueType) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "typeStrindex", "type_strindex": + vt.orig.TypeStrindex = json.ReadInt32(iter) + case "unitStrindex", "unit_strindex": + vt.orig.UnitStrindex = json.ReadInt32(iter) + case "aggregationTemporality", "aggregation_temporality": + vt.orig.AggregationTemporality = otlpprofiles.AggregationTemporality(json.ReadInt32(iter)) + default: + iter.Skip() + } + return true + }) +} + +func (st Sample) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "locationsStartIndex", "locations_start_index": + st.orig.LocationsStartIndex = json.ReadInt32(iter) + case "locationsLength", "locations_length": + st.orig.LocationsLength = json.ReadInt32(iter) + case "value": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + st.Value().Append(json.ReadInt64(iter)) + return true + }) + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + st.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + case "linkIndex", "link_index": + st.orig.LinkIndex_ = &otlpprofiles.Sample_LinkIndex{LinkIndex: json.ReadInt32(iter)} + case "timestampsUnixNano", "timestamps_unix_nano": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + st.TimestampsUnixNano().Append(json.ReadUint64(iter)) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (m Mapping) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "memoryStart", "memory_start": + m.orig.MemoryStart = json.ReadUint64(iter) + case "memoryLimit", "memory_limit": + m.orig.MemoryLimit = json.ReadUint64(iter) + case "fileOffset", "file_offset": + m.orig.FileOffset = json.ReadUint64(iter) + case "filenameStrindex", "filename_strindex": + m.orig.FilenameStrindex = json.ReadInt32(iter) + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + m.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + case "hasFunctions", "has_functions": + m.orig.HasFunctions = iter.ReadBool() + case "hasFilenames", "has_filenames": + m.orig.HasFilenames = iter.ReadBool() + case "hasLineNumbers", "has_line_numbers": + m.orig.HasLineNumbers = iter.ReadBool() + case "hasInlineFrames", "has_inline_frames": + m.orig.HasInlineFrames = iter.ReadBool() + default: + iter.Skip() + } + return true + }) +} + +func (l Location) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "mappingIndex", "mapping_index": + l.orig.MappingIndex_ = &otlpprofiles.Location_MappingIndex{MappingIndex: json.ReadInt32(iter)} + case "address": + l.orig.Address = json.ReadUint64(iter) + case "line": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + l.Line().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "isFolded", "is_folded": + l.orig.IsFolded = iter.ReadBool() + case "attributeIndices", "attribute_indices": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + l.AttributeIndices().Append(json.ReadInt32(iter)) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (l Line) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "functionIndex", "function_index": + l.orig.FunctionIndex = json.ReadInt32(iter) + case "line": + l.orig.Line = json.ReadInt64(iter) + case "column": + l.orig.Column = json.ReadInt64(iter) + default: + iter.Skip() + } + return true + }) +} + +func (fn Function) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "nameStrindex", "name_strindex": + fn.orig.NameStrindex = json.ReadInt32(iter) + case "systemNameStrindex", "system_name_strindex": + fn.orig.SystemNameStrindex = json.ReadInt32(iter) + case "filenameStrindex", "filename_strindex": + fn.orig.FilenameStrindex = json.ReadInt32(iter) + case "startLine", "start_line": + fn.orig.StartLine = json.ReadInt64(iter) + default: + iter.Skip() + } + return true + }) +} + +func (at AttributeUnit) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "attributeKeyStrindex", "attribute_key_strindex": + at.orig.AttributeKeyStrindex = json.ReadInt32(iter) + case "unitStrindex", "unit_strindex": + at.orig.UnitStrindex = json.ReadInt32(iter) + default: + iter.Skip() + } + return true + }) +} + +func (l Link) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "traceId", "trace_id": + if err := l.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("link.traceId", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := l.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("link.spanId", fmt.Sprintf("parse span_id:%v", err)) + } + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go new file mode 100644 index 00000000000..5da9ceda2f0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pb.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalProfiles(td Profiles) ([]byte, error) { + pb := internal.ProfilesToProto(internal.Profiles(td)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) ProfilesSize(td Profiles) int { + pb := internal.ProfilesToProto(internal.Profiles(td)) + return pb.Size() +} + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { + pb := otlpprofile.ProfilesData{} + err := pb.Unmarshal(buf) + return Profiles(internal.ProfilesFromProto(pb)), err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go new file mode 100644 index 00000000000..284c10f096a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/generated_exportpartialsuccess.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pprofileotlp + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExportPartialSuccess function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExportPartialSuccess struct { + orig *otlpcollectorprofile.ExportProfilesPartialSuccess + state *internal.State +} + +func newExportPartialSuccess(orig *otlpcollectorprofile.ExportProfilesPartialSuccess, state *internal.State) ExportPartialSuccess { + return ExportPartialSuccess{orig: orig, state: state} +} + +// NewExportPartialSuccess creates a new empty ExportPartialSuccess. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExportPartialSuccess() ExportPartialSuccess { + state := internal.StateMutable + return newExportPartialSuccess(&otlpcollectorprofile.ExportProfilesPartialSuccess{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpcollectorprofile.ExportProfilesPartialSuccess{} +} + +// RejectedProfiles returns the rejectedprofiles associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) RejectedProfiles() int64 { + return ms.orig.RejectedProfiles +} + +// SetRejectedProfiles replaces the rejectedprofiles associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetRejectedProfiles(v int64) { + ms.state.AssertMutable() + ms.orig.RejectedProfiles = v +} + +// ErrorMessage returns the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) ErrorMessage() string { + return ms.orig.ErrorMessage +} + +// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetErrorMessage(v string) { + ms.state.AssertMutable() + ms.orig.ErrorMessage = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { + dest.state.AssertMutable() + dest.SetRejectedProfiles(ms.RejectedProfiles()) + dest.SetErrorMessage(ms.ErrorMessage()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go new file mode 100644 index 00000000000..cb6c5e8093e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/grpc.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// GRPCClient is the client API for OTLP-GRPC Profiles service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCClient interface { + // Export pprofile.Profiles to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) + + // unexported disallow implementation of the GRPCClient. + unexported() +} + +// NewGRPCClient returns a new GRPCClient connected using the given connection. +func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { + return &grpcClient{rawClient: otlpcollectorprofile.NewProfilesServiceClient(cc)} +} + +type grpcClient struct { + rawClient otlpcollectorprofile.ProfilesServiceClient +} + +// Export implements the Client interface. +func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { + rsp, err := c.rawClient.Export(ctx, request.orig, opts...) + if err != nil { + return ExportResponse{}, err + } + state := internal.StateMutable + return ExportResponse{orig: rsp, state: &state}, err +} + +func (c *grpcClient) unexported() {} + +// GRPCServer is the server API for OTLP gRPC ProfilesService service. +// Implementations MUST embed UnimplementedGRPCServer. +type GRPCServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, ExportRequest) (ExportResponse, error) + + // unexported disallow implementation of the GRPCServer. + unexported() +} + +var _ GRPCServer = (*UnimplementedGRPCServer)(nil) + +// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. +type UnimplementedGRPCServer struct{} + +func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { + return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func (*UnimplementedGRPCServer) unexported() {} + +// RegisterGRPCServer registers the GRPCServer to the grpc.Server. +func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { + otlpcollectorprofile.RegisterProfilesServiceServer(s, &rawProfilesServer{srv: srv}) +} + +type rawProfilesServer struct { + srv GRPCServer +} + +func (s rawProfilesServer) Export(ctx context.Context, request *otlpcollectorprofile.ExportProfilesServiceRequest) (*otlpcollectorprofile.ExportProfilesServiceResponse, error) { + otlp.MigrateProfiles(request.ResourceProfiles) + state := internal.StateMutable + rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) + return rsp.orig, err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go new file mode 100644 index 00000000000..8c304944cab --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/request.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + +import ( + "bytes" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var jsonUnmarshaler = &pprofile.JSONUnmarshaler{} + +// ExportRequest represents the request for gRPC/HTTP client/server. +// It's a wrapper for pprofile.Profiles data. +type ExportRequest struct { + orig *otlpcollectorprofile.ExportProfilesServiceRequest + state *internal.State +} + +// NewExportRequest returns an empty ExportRequest. +func NewExportRequest() ExportRequest { + state := internal.StateMutable + return ExportRequest{ + orig: &otlpcollectorprofile.ExportProfilesServiceRequest{}, + state: &state, + } +} + +// NewExportRequestFromProfiles returns a ExportRequest from pprofile.Profiles. +// Because ExportRequest is a wrapper for pprofile.Profiles, +// any changes to the provided Profiles struct will be reflected in the ExportRequest and vice versa. +func NewExportRequestFromProfiles(td pprofile.Profiles) ExportRequest { + return ExportRequest{ + orig: internal.GetOrigProfiles(internal.Profiles(td)), + state: internal.GetProfilesState(internal.Profiles(td)), + } +} + +// MarshalProto marshals ExportRequest into proto bytes. +func (ms ExportRequest) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportRequest from proto bytes. +func (ms ExportRequest) UnmarshalProto(data []byte) error { + if err := ms.orig.Unmarshal(data); err != nil { + return err + } + otlp.MigrateProfiles(ms.orig.ResourceProfiles) + return nil +} + +// MarshalJSON marshals ExportRequest into JSON bytes. +func (ms ExportRequest) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportRequest from JSON bytes. +func (ms ExportRequest) UnmarshalJSON(data []byte) error { + td, err := jsonUnmarshaler.UnmarshalProfiles(data) + if err != nil { + return err + } + *ms.orig = *internal.GetOrigProfiles(internal.Profiles(td)) + return nil +} + +func (ms ExportRequest) Profiles() pprofile.Profiles { + return pprofile.Profiles(internal.NewProfiles(ms.orig, ms.state)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go new file mode 100644 index 00000000000..5b87f8b3418 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp/response.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofileotlp // import "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + +import ( + "bytes" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" + "go.opentelemetry.io/collector/pdata/internal/json" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportResponse struct { + orig *otlpcollectorprofile.ExportProfilesServiceResponse + state *internal.State +} + +// NewExportResponse returns an empty ExportResponse. +func NewExportResponse() ExportResponse { + state := internal.StateMutable + return ExportResponse{ + orig: &otlpcollectorprofile.ExportProfilesServiceResponse{}, + state: &state, + } +} + +// MarshalProto marshals ExportResponse into proto bytes. +func (ms ExportResponse) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportResponse from proto bytes. +func (ms ExportResponse) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportResponse into JSON bytes. +func (ms ExportResponse) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportResponse from JSON bytes. +func (ms ExportResponse) UnmarshalJSON(data []byte) error { + iter := jsoniter.ConfigFastest.BorrowIterator(data) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ms.unmarshalJsoniter(iter) + return iter.Error +} + +func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "partial_success", "partialSuccess": + ms.PartialSuccess().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +// PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. +func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { + return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) +} + +func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(_ *jsoniter.Iterator, f string) bool { + switch f { + case "rejected_profiles", "rejectedProfiles": + ms.orig.RejectedProfiles = json.ReadInt64(iter) + case "error_message", "errorMessage": + ms.orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go new file mode 100644 index 00000000000..52ee2e812b6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profileid.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "encoding/hex" + + "go.opentelemetry.io/collector/pdata/internal/data" +) + +var emptyProfileID = ProfileID([16]byte{}) + +// ProfileID is a profile identifier. +type ProfileID [16]byte + +// NewProfileIDEmpty returns a new empty (all zero bytes) ProfileID. +func NewProfileIDEmpty() ProfileID { + return emptyProfileID +} + +// String returns string representation of the ProfileID. +// +// Important: Don't rely on this method to get a string identifier of ProfileID. +// Use hex.EncodeToString explicitly instead. +// This method is meant to implement Stringer interface for display purposes only. +func (ms ProfileID) String() string { + if ms.IsEmpty() { + return "" + } + return hex.EncodeToString(ms[:]) +} + +// IsEmpty returns true if id doesn't contain at least one non-zero byte. +func (ms ProfileID) IsEmpty() bool { + return data.ProfileID(ms).IsEmpty() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go new file mode 100644 index 00000000000..de235a18506 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pprofile/profiles.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectorprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development" +) + +// profiles is the top-level struct that is propagated through the profiles pipeline. +// Use NewProfiles to create new instance, zero-initialized instance is not valid for use. +type Profiles internal.Profiles + +func newProfiles(orig *otlpcollectorprofile.ExportProfilesServiceRequest) Profiles { + state := internal.StateMutable + return Profiles(internal.NewProfiles(orig, &state)) +} + +func (ms Profiles) getOrig() *otlpcollectorprofile.ExportProfilesServiceRequest { + return internal.GetOrigProfiles(internal.Profiles(ms)) +} + +func (ms Profiles) getState() *internal.State { + return internal.GetProfilesState(internal.Profiles(ms)) +} + +// NewProfiles creates a new Profiles struct. +func NewProfiles() Profiles { + return newProfiles(&otlpcollectorprofile.ExportProfilesServiceRequest{}) +} + +// IsReadOnly returns true if this ResourceProfiles instance is read-only. +func (ms Profiles) IsReadOnly() bool { + return *ms.getState() == internal.StateReadOnly +} + +// CopyTo copies the Profiles instance overriding the destination. +func (ms Profiles) CopyTo(dest Profiles) { + ms.ResourceProfiles().CopyTo(dest.ResourceProfiles()) +} + +// ResourceProfiles returns the ResourceProfilesSlice associated with this Profiles. +func (ms Profiles) ResourceProfiles() ResourceProfilesSlice { + return newResourceProfilesSlice(&ms.getOrig().ResourceProfiles, internal.GetProfilesState(internal.Profiles(ms))) +} + +// MarkReadOnly marks the ResourceProfiles as shared so that no further modifications can be done on it. +func (ms Profiles) MarkReadOnly() { + internal.SetProfilesState(internal.Profiles(ms), internal.StateReadOnly) +} + +// SampleCount calculates the total number of samples. +func (ms Profiles) SampleCount() int { + sampleCount := 0 + rps := ms.ResourceProfiles() + for i := 0; i < rps.Len(); i++ { + rp := rps.At(i) + sps := rp.ScopeProfiles() + for j := 0; j < sps.Len(); j++ { + pcs := sps.At(j).Profiles() + for k := 0; k < pcs.Len(); k++ { + sampleCount += pcs.At(k).Sample().Len() + } + } + } + return sampleCount +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go index 64bd273ba96..2e35a95913a 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/json.go @@ -112,6 +112,8 @@ func (dest Span) unmarshalJsoniter(iter *jsoniter.Iterator) { if err := dest.orig.ParentSpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { iter.ReportError("readSpan.parentSpanId", fmt.Sprintf("parse parent_span_id:%v", err)) } + case "flags": + dest.orig.Flags = json.ReadUint32(iter) case "name": dest.orig.Name = iter.ReadString() case "kind": @@ -184,6 +186,8 @@ func (dest SpanLink) unmarshalJsoniter(iter *jsoniter.Iterator) { }) case "droppedAttributesCount", "dropped_attributes_count": dest.orig.DroppedAttributesCount = json.ReadUint32(iter) + case "flags": + dest.orig.Flags = json.ReadUint32(iter) default: iter.Skip() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go index 78096bff77c..a5017e27627 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go +++ b/vendor/go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp/response.go @@ -65,7 +65,6 @@ func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { } return true }) - } // PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go index 5d2a0a194ff..be9c8b2bce6 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/log.go @@ -10,9 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" ) -var ( - logTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) -) +var logTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) func GenerateLogs(count int) plog.Logs { ld := plog.NewLogs() diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go new file mode 100644 index 00000000000..e67c924f03d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/profile.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package testdata // import "go.opentelemetry.io/collector/pdata/testdata" + +import ( + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var ( + profileStartTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC)) + profileEndTimestamp = pcommon.NewTimestampFromTime(time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC)) +) + +// GenerateProfiles generates dummy profiling data for tests +func GenerateProfiles(profilesCount int) pprofile.Profiles { + td := pprofile.NewProfiles() + initResource(td.ResourceProfiles().AppendEmpty().Resource()) + ss := td.ResourceProfiles().At(0).ScopeProfiles().AppendEmpty().Profiles() + ss.EnsureCapacity(profilesCount) + for i := 0; i < profilesCount; i++ { + switch i % 2 { + case 0: + fillProfileOne(ss.AppendEmpty()) + case 1: + fillProfileTwo(ss.AppendEmpty()) + } + } + return td +} + +func fillProfileOne(profile pprofile.Profile) { + profile.SetProfileID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + profile.SetTime(profileStartTimestamp) + profile.SetDuration(profileEndTimestamp) + profile.SetDroppedAttributesCount(1) + + attr := profile.AttributeTable().AppendEmpty() + attr.SetKey("key") + attr.Value().SetStr("value") + + sample := profile.Sample().AppendEmpty() + sample.SetLocationsStartIndex(2) + sample.SetLocationsLength(10) + sample.Value().Append(4) + sample.AttributeIndices().Append(0) +} + +func fillProfileTwo(profile pprofile.Profile) { + profile.SetProfileID([16]byte{0x02, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) + profile.SetTime(profileStartTimestamp) + profile.SetDuration(profileEndTimestamp) + + attr := profile.AttributeTable().AppendEmpty() + attr.SetKey("key") + attr.Value().SetStr("value") + + sample := profile.Sample().AppendEmpty() + sample.SetLocationsStartIndex(7) + sample.SetLocationsLength(20) + sample.Value().Append(9) + sample.AttributeIndices().Append(0) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go b/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go index 3d69ed3a425..779430af666 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go +++ b/vendor/go.opentelemetry.io/collector/pdata/testdata/trace.go @@ -37,6 +37,7 @@ func fillSpanOne(span ptrace.Span) { span.SetStartTimestamp(spanStartTimestamp) span.SetEndTimestamp(spanEndTimestamp) span.SetDroppedAttributesCount(1) + span.TraceState().FromRaw("ot=th:0") // 100% sampling span.SetTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) span.SetSpanID([8]byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}) evs := span.Events() diff --git a/vendor/go.opentelemetry.io/collector/pipeline/LICENSE b/vendor/go.opentelemetry.io/collector/pipeline/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pipeline/Makefile b/vendor/go.opentelemetry.io/collector/pipeline/Makefile new file mode 100644 index 00000000000..39734bfaebb --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go b/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go new file mode 100644 index 00000000000..dd7c6ec623c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/internal/globalsignal/signal.go @@ -0,0 +1,51 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package globalsignal // import "go.opentelemetry.io/collector/pipeline/internal/globalsignal" + +import ( + "errors" + "fmt" + "regexp" +) + +// Signal represents the signals supported by the collector. +type Signal struct { + name string +} + +// String returns the string representation of the signal. +func (s Signal) String() string { + return s.name +} + +// MarshalText marshals the Signal. +func (s Signal) MarshalText() (text []byte, err error) { + return []byte(s.name), nil +} + +// signalRegex is used to validate the signal. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +var signalRegex = regexp.MustCompile(`^[a-z]{1,62}$`) + +// NewSignal creates a Signal. It returns an error if the Signal is invalid. +// A Signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func NewSignal(signal string) (Signal, error) { + if len(signal) == 0 { + return Signal{}, errors.New("signal must not be empty") + } + if !signalRegex.MatchString(signal) { + return Signal{}, fmt.Errorf("invalid character(s) in type %q", signal) + } + return Signal{name: signal}, nil +} + +// MustNewSignal creates a Signal. It panics if the Signal is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func MustNewSignal(signal string) Signal { + s, err := NewSignal(signal) + if err != nil { + panic(err) + } + return s +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go new file mode 100644 index 00000000000..aa2d3d0d0ad --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/pipeline.go @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pipeline // import "go.opentelemetry.io/collector/pipeline" +import ( + "errors" + "fmt" + "regexp" + "strings" + + "go.opentelemetry.io/collector/pipeline/internal/globalsignal" +) + +// typeAndNameSeparator is the separator that is used between type and name in type/name composite keys. +const typeAndNameSeparator = "/" + +// ID represents the identity for a pipeline. It combines two values: +// * signal - the Signal of the pipeline. +// * name - the name of that pipeline. +type ID struct { + signal Signal `mapstructure:"-"` + name string `mapstructure:"-"` +} + +// NewID returns a new ID with the given Signal and empty name. +func NewID(signal Signal) ID { + return ID{signal: signal} +} + +// MustNewID builds a Signal and returns a new ID with the given Signal and empty name. +// It panics if the Signal is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +func MustNewID(signal string) ID { + return ID{signal: globalsignal.MustNewSignal(signal)} +} + +// NewIDWithName returns a new ID with the given Signal and name. +func NewIDWithName(signal Signal, name string) ID { + return ID{signal: signal, name: name} +} + +// MustNewIDWithName builds a Signal and returns a new ID with the given Signal and name. +// It panics if the Signal is invalid or name is invalid. +// A signal must consist of 1 to 62 lowercase ASCII alphabetic characters. +// A name must consist of 1 to 1024 unicode characters excluding whitespace, control characters, and symbols. +func MustNewIDWithName(signal string, name string) ID { + id := ID{signal: globalsignal.MustNewSignal(signal)} + err := validateName(name) + if err != nil { + panic(err) + } + id.name = name + return id +} + +// Signal returns the Signal of the ID. +func (i ID) Signal() Signal { + return i.signal +} + +// Name returns the name of the ID. +func (i ID) Name() string { + return i.name +} + +// MarshalText implements the encoding.TextMarshaler interface. +// This marshals the Signal and name as one string in the config. +func (i ID) MarshalText() (text []byte, err error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (i *ID) UnmarshalText(text []byte) error { + idStr := string(text) + items := strings.SplitN(idStr, typeAndNameSeparator, 2) + var signalStr, nameStr string + if len(items) >= 1 { + signalStr = strings.TrimSpace(items[0]) + } + + if len(items) == 1 && signalStr == "" { + return errors.New("id must not be empty") + } + + if signalStr == "" { + return fmt.Errorf("in %q id: the part before %s should not be empty", idStr, typeAndNameSeparator) + } + + if len(items) > 1 { + // "name" part is present. + nameStr = strings.TrimSpace(items[1]) + if nameStr == "" { + return fmt.Errorf("in %q id: the part after %s should not be empty", idStr, typeAndNameSeparator) + } + if err := validateName(nameStr); err != nil { + return fmt.Errorf("in %q id: %w", nameStr, err) + } + } + + var err error + if i.signal, err = globalsignal.NewSignal(signalStr); err != nil { + return fmt.Errorf("in %q id: %w", idStr, err) + } + i.name = nameStr + + return nil +} + +// String returns the ID string representation as "signal[/name]" format. +func (i ID) String() string { + if i.name == "" { + return i.signal.String() + } + + return i.signal.String() + typeAndNameSeparator + i.name +} + +// nameRegexp is used to validate the name of an ID. A name can consist of +// 1 to 1024 unicode characters excluding whitespace, control characters, and +// symbols. +var nameRegexp = regexp.MustCompile(`^[^\pZ\pC\pS]+$`) + +func validateName(nameStr string) error { + if len(nameStr) > 1024 { + return fmt.Errorf("name %q is longer than 1024 characters (%d characters)", nameStr, len(nameStr)) + } + if !nameRegexp.MatchString(nameStr) { + return fmt.Errorf("invalid character(s) in name %q", nameStr) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/pipeline/signal.go b/vendor/go.opentelemetry.io/collector/pipeline/signal.go new file mode 100644 index 00000000000..77376c999ad --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/signal.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pipeline // import "go.opentelemetry.io/collector/pipeline" + +import ( + "errors" + + "go.opentelemetry.io/collector/pipeline/internal/globalsignal" +) + +// Signal represents the signals supported by the collector. We currently support +// collecting metrics, traces and logs, this can expand in the future. +type Signal = globalsignal.Signal + +var ErrSignalNotSupported = errors.New("telemetry type is not supported") + +var ( + SignalTraces = globalsignal.MustNewSignal("traces") + SignalMetrics = globalsignal.MustNewSignal("metrics") + SignalLogs = globalsignal.MustNewSignal("logs") +) diff --git a/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go new file mode 100644 index 00000000000..f37a7ea479d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pipeline/xpipeline/config.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xpipeline // import "go.opentelemetry.io/collector/pipeline/xpipeline" + +import "go.opentelemetry.io/collector/pipeline/internal/globalsignal" + +var SignalProfiles = globalsignal.MustNewSignal("profiles") diff --git a/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go b/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go new file mode 100644 index 00000000000..c96fbe5e9e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/internal/obsmetrics.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/processor/internal" + +const ( + MetricNameSep = "_" + + // ProcessorKey is the key used to identify processors in metrics and traces. + ProcessorKey = "processor" + + ProcessorMetricPrefix = ProcessorKey + MetricNameSep +) diff --git a/vendor/go.opentelemetry.io/collector/processor/processor.go b/vendor/go.opentelemetry.io/collector/processor/processor.go index 98feba69a57..8ecd4d497c6 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processor.go @@ -5,17 +5,11 @@ package processor // import "go.opentelemetry.io/collector/processor" import ( "context" - "errors" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" -) - -var ( - errNilNextConsumer = errors.New("nil next Consumer") + "go.opentelemetry.io/collector/pipeline" ) // Traces is a processor that can consume traces. @@ -36,8 +30,8 @@ type Logs interface { consumer.Logs } -// CreateSettings is passed to Create* functions in Factory. -type CreateSettings struct { +// Settings is passed to Create* functions in Factory. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -50,41 +44,44 @@ type CreateSettings struct { // Factory is Factory interface for processors. // // This interface cannot be directly implemented. Implementations must -// use the NewProcessorFactory to implement it. +// use the NewFactory to implement it. type Factory interface { component.Factory - // CreateTracesProcessor creates a TracesProcessor based on this config. - // If the processor type does not support tracing or if the config is not valid, - // an error will be returned instead. - CreateTracesProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + // CreateTraces creates a Traces processor based on this config. + // If the processor type does not support traces, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) - // TracesProcessorStability gets the stability level of the TracesProcessor. - TracesProcessorStability() component.StabilityLevel + // TracesStability gets the stability level of the Traces processor. + TracesStability() component.StabilityLevel - // CreateMetricsProcessor creates a MetricsProcessor based on this config. - // If the processor type does not support metrics or if the config is not valid, - // an error will be returned instead. - CreateMetricsProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + // CreateMetrics creates a Metrics processor based on this config. + // If the processor type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) - // MetricsProcessorStability gets the stability level of the MetricsProcessor. - MetricsProcessorStability() component.StabilityLevel + // MetricsStability gets the stability level of the Metrics processor. + MetricsStability() component.StabilityLevel - // CreateLogsProcessor creates a LogsProcessor based on the config. - // If the processor type does not support logs or if the config is not valid, - // an error will be returned instead. - CreateLogsProcessor(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + // CreateLogs creates a Logs processor based on the config. + // If the processor type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) - // LogsProcessorStability gets the stability level of the LogsProcessor. - LogsProcessorStability() component.StabilityLevel + // LogsStability gets the stability level of the Logs processor. + LogsStability() component.StabilityLevel unexportedFactoryFunc() } // FactoryOption apply changes to Options. type FactoryOption interface { - // applyProcessorFactoryOption applies the option. - applyProcessorFactoryOption(o *factory) + // applyOption applies the option. + applyOption(o *factory) } var _ FactoryOption = (*factoryOptionFunc)(nil) @@ -92,57 +89,10 @@ var _ FactoryOption = (*factoryOptionFunc)(nil) // factoryOptionFunc is a FactoryOption created through a function. type factoryOptionFunc func(*factory) -func (f factoryOptionFunc) applyProcessorFactoryOption(o *factory) { +func (f factoryOptionFunc) applyOption(o *factory) { f(o) } -// CreateTracesFunc is the equivalent of Factory.CreateTraces(). -type CreateTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) - -// CreateTracesProcessor implements Factory.CreateTracesProcessor(). -func (f CreateTracesFunc) CreateTracesProcessor( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces) (Traces, error) { - if f == nil { - return nil, component.ErrDataTypeIsNotSupported - } - return f(ctx, set, cfg, nextConsumer) -} - -// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) - -// CreateMetricsProcessor implements Factory.CreateMetricsProcessor(). -func (f CreateMetricsFunc) CreateMetricsProcessor( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Metrics, error) { - if f == nil { - return nil, component.ErrDataTypeIsNotSupported - } - return f(ctx, set, cfg, nextConsumer) -} - -// CreateLogsFunc is the equivalent of Factory.CreateLogs(). -type CreateLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) - -// CreateLogsProcessor implements Factory.CreateLogsProcessor(). -func (f CreateLogsFunc) CreateLogsProcessor( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Logs, error) { - if f == nil { - return nil, component.ErrDataTypeIsNotSupported - } - return f(ctx, set, cfg, nextConsumer) -} - type factory struct { cfgType component.Type component.CreateDefaultConfigFunc @@ -160,18 +110,51 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f factory) TracesProcessorStability() component.StabilityLevel { +func (f *factory) TracesStability() component.StabilityLevel { return f.tracesStabilityLevel } -func (f factory) MetricsProcessorStability() component.StabilityLevel { +func (f *factory) MetricsStability() component.StabilityLevel { return f.metricsStabilityLevel } -func (f factory) LogsProcessorStability() component.StabilityLevel { +func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } +// CreateTracesFunc is the equivalent of Factory.CreateTraces(). +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) + +// CreateTraces implements Factory.CreateTraces. +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// CreateMetricsFunc is the equivalent of Factory.CreateMetrics(). +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// CreateLogsFunc is the equivalent of Factory.CreateLogs. +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) + +// CreateLogs implements Factory.CreateLogs(). +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + // WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { @@ -203,7 +186,7 @@ func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefa CreateDefaultConfigFunc: createDefaultConfig, } for _, opt := range options { - opt.applyProcessorFactoryOption(f) + opt.applyOption(f) } return f } @@ -220,86 +203,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder processor is a helper struct that given a set of Configs and Factories helps with creating processors. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new processor.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTraces creates a Traces processor based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("processor %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("processor factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesProcessorStability()) - return f.CreateTracesProcessor(ctx, set, cfg, next) -} - -// CreateMetrics creates a Metrics processor based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("processor %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("processor factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsProcessorStability()) - return f.CreateMetricsProcessor(ctx, set, cfg, next) -} - -// CreateLogs creates a Logs processor based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("processor %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("processor factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsProcessorStability()) - return f.CreateLogsProcessor(ctx, set, cfg, next) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md b/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md index 073fc9a7219..1c1b10f9f46 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/documentation.md @@ -6,74 +6,18 @@ The following telemetry is emitted by this component. -### processor_accepted_log_records +### otelcol_processor_incoming_items -Number of log records successfully pushed into the next component in the pipeline. +Number of items passed to the processor. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {items} | Sum | Int | true | -### processor_accepted_metric_points +### otelcol_processor_outgoing_items -Number of metric points successfully pushed into the next component in the pipeline. +Number of items emitted from the processor. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_accepted_spans - -Number of spans successfully pushed into the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_dropped_log_records - -Number of log records that were dropped. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_dropped_metric_points - -Number of metric points that were dropped. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_dropped_spans - -Number of spans that were dropped. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_refused_log_records - -Number of log records that were rejected by the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_refused_metric_points - -Number of metric points that were rejected by the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | - -### processor_refused_spans - -Number of spans that were rejected by the next component in the pipeline. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {items} | Sum | Int | true | diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go index 568b689edd5..0cc9d5c5245 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/internal/metadata/generated_telemetry.go @@ -24,97 +24,49 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { - ProcessorAcceptedLogRecords metric.Int64Counter - ProcessorAcceptedMetricPoints metric.Int64Counter - ProcessorAcceptedSpans metric.Int64Counter - ProcessorDroppedLogRecords metric.Int64Counter - ProcessorDroppedMetricPoints metric.Int64Counter - ProcessorDroppedSpans metric.Int64Counter - ProcessorRefusedLogRecords metric.Int64Counter - ProcessorRefusedMetricPoints metric.Int64Counter - ProcessorRefusedSpans metric.Int64Counter - level configtelemetry.Level + meter metric.Meter + ProcessorIncomingItems metric.Int64Counter + ProcessorOutgoingItems metric.Int64Counter } -// telemetryBuilderOption applies changes to default builder. -type telemetryBuilderOption func(*TelemetryBuilder) +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} -// WithLevel sets the current telemetry level for the component. -func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { - return func(builder *TelemetryBuilder) { - builder.level = lvl - } +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{level: configtelemetry.LevelBasic} +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} for _, op := range options { - op(&builder) + op.apply(&builder) } - var ( - err, errs error - meter metric.Meter - ) - if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) - } else { - meter = noop.Meter{} - } - builder.ProcessorAcceptedLogRecords, err = meter.Int64Counter( - "processor_accepted_log_records", - metric.WithDescription("Number of log records successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorAcceptedMetricPoints, err = meter.Int64Counter( - "processor_accepted_metric_points", - metric.WithDescription("Number of metric points successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorAcceptedSpans, err = meter.Int64Counter( - "processor_accepted_spans", - metric.WithDescription("Number of spans successfully pushed into the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorDroppedLogRecords, err = meter.Int64Counter( - "processor_dropped_log_records", - metric.WithDescription("Number of log records that were dropped."), - metric.WithUnit("1"), + builder.meter = Meter(settings) + var err, errs error + builder.ProcessorIncomingItems, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_incoming_items", + metric.WithDescription("Number of items passed to the processor. [alpha]"), + metric.WithUnit("{items}"), ) errs = errors.Join(errs, err) - builder.ProcessorDroppedMetricPoints, err = meter.Int64Counter( - "processor_dropped_metric_points", - metric.WithDescription("Number of metric points that were dropped."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorDroppedSpans, err = meter.Int64Counter( - "processor_dropped_spans", - metric.WithDescription("Number of spans that were dropped."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorRefusedLogRecords, err = meter.Int64Counter( - "processor_refused_log_records", - metric.WithDescription("Number of log records that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorRefusedMetricPoints, err = meter.Int64Counter( - "processor_refused_metric_points", - metric.WithDescription("Number of metric points that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), - ) - errs = errors.Join(errs, err) - builder.ProcessorRefusedSpans, err = meter.Int64Counter( - "processor_refused_spans", - metric.WithDescription("Number of spans that were rejected by the next component in the pipeline."), - metric.WithUnit("1"), + builder.ProcessorOutgoingItems, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_processor_outgoing_items", + metric.WithDescription("Number of items emitted from the processor. [alpha]"), + metric.WithUnit("{items}"), ) errs = errors.Join(errs, err) return &builder, errs } + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go index ade2f45a385..23c702cd2a3 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" ) @@ -19,47 +20,56 @@ import ( // If error is returned then returned data are ignored. It MUST not call the next component. type ProcessLogsFunc func(context.Context, plog.Logs) (plog.Logs, error) -type logProcessor struct { +type logs struct { component.StartFunc component.ShutdownFunc consumer.Logs } -// NewLogsProcessor creates a processor.Logs that ensure context propagation and the right tags are set. -func NewLogsProcessor( +// NewLogs creates a processor.Logs that ensure context propagation and the right tags are set. +func NewLogs( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Logs, logsFunc ProcessLogsFunc, options ...Option, ) (processor.Logs, error) { - // TODO: Add observability metrics support if logsFunc == nil { return nil, errors.New("nil logsFunc") } + obs, err := newObsReport(set, pipeline.SignalLogs) + if err != nil { + return nil, err + } + eventOptions := spanAttributes(set.ID) bs := fromOptions(options) logsConsumer, err := consumer.NewLogs(func(ctx context.Context, ld plog.Logs) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) - var err error - ld, err = logsFunc(ctx, ld) + recordsIn := ld.LogRecordCount() + + var errFunc error + ld, errFunc = logsFunc(ctx, ld) span.AddEvent("End processing.", eventOptions) - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { + if errFunc != nil { + obs.recordInOut(ctx, recordsIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { return nil } - return err + return errFunc } + recordsOut := ld.LogRecordCount() + obs.recordInOut(ctx, recordsIn, recordsOut) return nextConsumer.ConsumeLogs(ctx, ld) }, bs.consumerOptions...) if err != nil { return nil, err } - return &logProcessor{ + return &logs{ StartFunc: bs.StartFunc, ShutdownFunc: bs.ShutdownFunc, Logs: logsConsumer, diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml index 7e0c72f5c9c..32ae7621b12 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metadata.yaml @@ -5,78 +5,25 @@ status: not_component: true stability: beta: [traces, metrics, logs] - distributions: [core, contrib] telemetry: metrics: - processor_accepted_spans: + processor_incoming_items: enabled: true - description: Number of spans successfully pushed into the next component in the pipeline. - unit: 1 + stability: + level: alpha + description: Number of items passed to the processor. + unit: "{items}" sum: value_type: int monotonic: true - processor_refused_spans: + processor_outgoing_items: enabled: true - description: Number of spans that were rejected by the next component in the pipeline. - unit: 1 + stability: + level: alpha + description: Number of items emitted from the processor. + unit: "{items}" sum: value_type: int monotonic: true - - processor_dropped_spans: - enabled: true - description: Number of spans that were dropped. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_accepted_metric_points: - enabled: true - description: Number of metric points successfully pushed into the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_refused_metric_points: - enabled: true - description: Number of metric points that were rejected by the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_dropped_metric_points: - enabled: true - description: Number of metric points that were dropped. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_accepted_log_records: - enabled: true - description: Number of log records successfully pushed into the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_refused_log_records: - enabled: true - description: Number of log records that were rejected by the next component in the pipeline. - unit: 1 - sum: - value_type: int - monotonic: true - - processor_dropped_log_records: - enabled: true - description: Number of log records that were dropped. - unit: 1 - sum: - value_type: int - monotonic: true \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go index ac3802722d0..0bd32553335 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" ) @@ -19,47 +20,56 @@ import ( // If error is returned then returned data are ignored. It MUST not call the next component. type ProcessMetricsFunc func(context.Context, pmetric.Metrics) (pmetric.Metrics, error) -type metricsProcessor struct { +type metrics struct { component.StartFunc component.ShutdownFunc consumer.Metrics } -// NewMetricsProcessor creates a processor.Metrics that ensure context propagation and the right tags are set. -func NewMetricsProcessor( +// NewMetrics creates a processor.Metrics that ensure context propagation and the right tags are set. +func NewMetrics( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Metrics, metricsFunc ProcessMetricsFunc, options ...Option, ) (processor.Metrics, error) { - // TODO: Add observability metrics support if metricsFunc == nil { return nil, errors.New("nil metricsFunc") } + obs, err := newObsReport(set, pipeline.SignalMetrics) + if err != nil { + return nil, err + } + eventOptions := spanAttributes(set.ID) bs := fromOptions(options) metricsConsumer, err := consumer.NewMetrics(func(ctx context.Context, md pmetric.Metrics) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) - var err error - md, err = metricsFunc(ctx, md) + pointsIn := md.DataPointCount() + + var errFunc error + md, errFunc = metricsFunc(ctx, md) span.AddEvent("End processing.", eventOptions) - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { + if errFunc != nil { + obs.recordInOut(ctx, pointsIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { return nil } - return err + return errFunc } + pointsOut := md.DataPointCount() + obs.recordInOut(ctx, pointsIn, pointsOut) return nextConsumer.ConsumeMetrics(ctx, md) }, bs.consumerOptions...) if err != nil { return nil, err } - return &metricsProcessor{ + return &metrics{ StartFunc: bs.StartFunc, ShutdownFunc: bs.ShutdownFunc, Metrics: metricsConsumer, diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go index 51aa26a9a88..a342e021d75 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/obsreport.go @@ -5,128 +5,38 @@ package processorhelper // import "go.opentelemetry.io/collector/processor/proce import ( "context" - "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/internal" "go.opentelemetry.io/collector/processor/processorhelper/internal/metadata" ) -// BuildCustomMetricName is used to be build a metric name following -// the standards used in the Collector. The configType should be the same -// value used to identify the type on the config. -func BuildCustomMetricName(configType, metric string) string { - componentPrefix := obsmetrics.ProcessorMetricPrefix - if !strings.HasSuffix(componentPrefix, obsmetrics.MetricNameSep) { - componentPrefix += obsmetrics.MetricNameSep - } - if configType == "" { - return componentPrefix - } - return componentPrefix + configType + obsmetrics.MetricNameSep + metric -} - -// ObsReport is a helper to add observability to a processor. -type ObsReport struct { - logger *zap.Logger +const signalKey = "otel.signal" - otelAttrs []attribute.KeyValue +type obsReport struct { + otelAttrs metric.MeasurementOption telemetryBuilder *metadata.TelemetryBuilder } -// ObsReportSettings are settings for creating an ObsReport. -type ObsReportSettings struct { - ProcessorID component.ID - ProcessorCreateSettings processor.CreateSettings -} - -// NewObsReport creates a new Processor. -func NewObsReport(cfg ObsReportSettings) (*ObsReport, error) { - return newObsReport(cfg) -} - -func newObsReport(cfg ObsReportSettings) (*ObsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ProcessorCreateSettings.TelemetrySettings, metadata.WithLevel(cfg.ProcessorCreateSettings.MetricsLevel)) +func newObsReport(set processor.Settings, signal pipeline.Signal) (*obsReport, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) if err != nil { return nil, err } - return &ObsReport{ - logger: cfg.ProcessorCreateSettings.Logger, - otelAttrs: []attribute.KeyValue{ - attribute.String(obsmetrics.ProcessorKey, cfg.ProcessorID.String()), - }, + return &obsReport{ + otelAttrs: metric.WithAttributeSet(attribute.NewSet( + attribute.String(internal.ProcessorKey, set.ID.String()), + attribute.String(signalKey, signal.String()), + )), telemetryBuilder: telemetryBuilder, }, nil } -func (or *ObsReport) recordData(ctx context.Context, dataType component.DataType, accepted, refused, dropped int64) { - var acceptedCount, refusedCount, droppedCount metric.Int64Counter - switch dataType { - case component.DataTypeTraces: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedSpans - refusedCount = or.telemetryBuilder.ProcessorRefusedSpans - droppedCount = or.telemetryBuilder.ProcessorDroppedSpans - case component.DataTypeMetrics: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedMetricPoints - refusedCount = or.telemetryBuilder.ProcessorRefusedMetricPoints - droppedCount = or.telemetryBuilder.ProcessorDroppedMetricPoints - case component.DataTypeLogs: - acceptedCount = or.telemetryBuilder.ProcessorAcceptedLogRecords - refusedCount = or.telemetryBuilder.ProcessorRefusedLogRecords - droppedCount = or.telemetryBuilder.ProcessorDroppedLogRecords - } - - acceptedCount.Add(ctx, accepted, metric.WithAttributes(or.otelAttrs...)) - refusedCount.Add(ctx, refused, metric.WithAttributes(or.otelAttrs...)) - droppedCount.Add(ctx, dropped, metric.WithAttributes(or.otelAttrs...)) -} - -// TracesAccepted reports that the trace data was accepted. -func (or *ObsReport) TracesAccepted(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(numSpans), int64(0), int64(0)) -} - -// TracesRefused reports that the trace data was refused. -func (or *ObsReport) TracesRefused(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(numSpans), int64(0)) -} - -// TracesDropped reports that the trace data was dropped. -func (or *ObsReport) TracesDropped(ctx context.Context, numSpans int) { - or.recordData(ctx, component.DataTypeTraces, int64(0), int64(0), int64(numSpans)) -} - -// MetricsAccepted reports that the metrics were accepted. -func (or *ObsReport) MetricsAccepted(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(numPoints), int64(0), int64(0)) -} - -// MetricsRefused reports that the metrics were refused. -func (or *ObsReport) MetricsRefused(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(numPoints), int64(0)) -} - -// MetricsDropped reports that the metrics were dropped. -func (or *ObsReport) MetricsDropped(ctx context.Context, numPoints int) { - or.recordData(ctx, component.DataTypeMetrics, int64(0), int64(0), int64(numPoints)) -} - -// LogsAccepted reports that the logs were accepted. -func (or *ObsReport) LogsAccepted(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(numRecords), int64(0), int64(0)) -} - -// LogsRefused reports that the logs were refused. -func (or *ObsReport) LogsRefused(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(numRecords), int64(0)) -} - -// LogsDropped reports that the logs were dropped. -func (or *ObsReport) LogsDropped(ctx context.Context, numRecords int) { - or.recordData(ctx, component.DataTypeLogs, int64(0), int64(0), int64(numRecords)) +func (or *obsReport) recordInOut(ctx context.Context, incoming, outgoing int) { + or.telemetryBuilder.ProcessorIncomingItems.Add(ctx, int64(incoming), or.otelAttrs) + or.telemetryBuilder.ProcessorOutgoingItems.Add(ctx, int64(outgoing), or.otelAttrs) } diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go index 8bdaa4fbf73..03d99b3375b 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go @@ -13,7 +13,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/processor/internal" ) // ErrSkipProcessingData is a sentinel value to indicate when traces or metrics should intentionally be dropped @@ -22,30 +22,38 @@ import ( var ErrSkipProcessingData = errors.New("sentinel error to skip processing data from the remainder of the pipeline") // Option apply changes to internalOptions. -type Option func(*baseSettings) +type Option interface { + apply(*baseSettings) +} + +type optionFunc func(*baseSettings) + +func (of optionFunc) apply(e *baseSettings) { + of(e) +} // WithStart overrides the default Start function for an processor. // The default shutdown function does nothing and always returns nil. func WithStart(start component.StartFunc) Option { - return func(o *baseSettings) { + return optionFunc(func(o *baseSettings) { o.StartFunc = start - } + }) } // WithShutdown overrides the default Shutdown function for an processor. // The default shutdown function does nothing and always returns nil. func WithShutdown(shutdown component.ShutdownFunc) Option { - return func(o *baseSettings) { + return optionFunc(func(o *baseSettings) { o.ShutdownFunc = shutdown - } + }) } // WithCapabilities overrides the default GetCapabilities function for an processor. // The default GetCapabilities function returns mutable capabilities. func WithCapabilities(capabilities consumer.Capabilities) Option { - return func(o *baseSettings) { + return optionFunc(func(o *baseSettings) { o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities)) - } + }) } type baseSettings struct { @@ -62,12 +70,12 @@ func fromOptions(options []Option) *baseSettings { } for _, op := range options { - op(opts) + op.apply(opts) } return opts } func spanAttributes(id component.ID) trace.EventOption { - return trace.WithAttributes(attribute.String(obsmetrics.ProcessorKey, id.String())) + return trace.WithAttributes(attribute.String(internal.ProcessorKey, id.String())) } diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go index 578f65c7efa..f6389a2272a 100644 --- a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go +++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/processor" ) @@ -19,48 +20,56 @@ import ( // If error is returned then returned data are ignored. It MUST not call the next component. type ProcessTracesFunc func(context.Context, ptrace.Traces) (ptrace.Traces, error) -type tracesProcessor struct { +type traces struct { component.StartFunc component.ShutdownFunc consumer.Traces } -// NewTracesProcessor creates a processor.Traces that ensure context propagation and the right tags are set. -func NewTracesProcessor( +// NewTraces creates a processor.Traces that ensure context propagation and the right tags are set. +func NewTraces( _ context.Context, - set processor.CreateSettings, + set processor.Settings, _ component.Config, nextConsumer consumer.Traces, tracesFunc ProcessTracesFunc, options ...Option, ) (processor.Traces, error) { - // TODO: Add observability Traces support if tracesFunc == nil { return nil, errors.New("nil tracesFunc") } + obs, err := newObsReport(set, pipeline.SignalTraces) + if err != nil { + return nil, err + } + eventOptions := spanAttributes(set.ID) bs := fromOptions(options) traceConsumer, err := consumer.NewTraces(func(ctx context.Context, td ptrace.Traces) error { span := trace.SpanFromContext(ctx) span.AddEvent("Start processing.", eventOptions) - var err error - td, err = tracesFunc(ctx, td) + spansIn := td.SpanCount() + + var errFunc error + td, errFunc = tracesFunc(ctx, td) span.AddEvent("End processing.", eventOptions) - if err != nil { - if errors.Is(err, ErrSkipProcessingData) { + if errFunc != nil { + obs.recordInOut(ctx, spansIn, 0) + if errors.Is(errFunc, ErrSkipProcessingData) { return nil } - return err + return errFunc } + spansOut := td.SpanCount() + obs.recordInOut(ctx, spansIn, spansOut) return nextConsumer.ConsumeTraces(ctx, td) }, bs.consumerOptions...) - if err != nil { return nil, err } - return &tracesProcessor{ + return &traces{ StartFunc: bs.StartFunc, ShutdownFunc: bs.ShutdownFunc, Traces: traceConsumer, diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE b/vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/Makefile b/vendor/go.opentelemetry.io/collector/processor/processortest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go b/vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go new file mode 100644 index 00000000000..adcbf60ca38 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/nop_processor.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processortest // import "go.opentelemetry.io/collector/processor/processortest" + +import ( + "context" + + "github.com/google/uuid" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/xprocessor" +) + +var nopType = component.MustNewType("nop") + +// NewNopSettings returns a new nop settings for Create* functions. +func NewNopSettings() processor.Settings { + return processor.Settings{ + ID: component.NewIDWithName(nopType, uuid.NewString()), + TelemetrySettings: componenttest.NewNopTelemetrySettings(), + BuildInfo: component.NewDefaultBuildInfo(), + } +} + +// NewNopFactory returns a component.ProcessorFactory that constructs nop processors. +func NewNopFactory() processor.Factory { + return xprocessor.NewFactory( + nopType, + func() component.Config { return &nopConfig{} }, + xprocessor.WithTraces(createTraces, component.StabilityLevelStable), + xprocessor.WithMetrics(createMetrics, component.StabilityLevelStable), + xprocessor.WithLogs(createLogs, component.StabilityLevelStable), + xprocessor.WithProfiles(createProfiles, component.StabilityLevelAlpha), + ) +} + +func createTraces(context.Context, processor.Settings, component.Config, consumer.Traces) (processor.Traces, error) { + return nopInstance, nil +} + +func createMetrics(context.Context, processor.Settings, component.Config, consumer.Metrics) (processor.Metrics, error) { + return nopInstance, nil +} + +func createLogs(context.Context, processor.Settings, component.Config, consumer.Logs) (processor.Logs, error) { + return nopInstance, nil +} + +func createProfiles(context.Context, processor.Settings, component.Config, xconsumer.Profiles) (xprocessor.Profiles, error) { + return nopInstance, nil +} + +type nopConfig struct{} + +var nopInstance = &nop{ + Consumer: consumertest.NewNop(), +} + +// nop acts as a processor for testing purposes. +type nop struct { + component.StartFunc + component.ShutdownFunc + consumertest.Consumer +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go b/vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go new file mode 100644 index 00000000000..9fe4b9a048c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/shutdown_verifier.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processortest // import "go.opentelemetry.io/collector/processor/processortest" + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/pdata/testdata" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" +) + +func verifyTracesDoesNotProduceAfterShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + // Create a proc and output its produce to a sink. + nextSink := new(consumertest.TracesSink) + proc, err := factory.CreateTraces(context.Background(), NewNopSettings(), cfg, nextSink) + if errors.Is(err, pipeline.ErrSignalNotSupported) { + return + } + require.NoError(t, err) + assert.NoError(t, proc.Start(context.Background(), componenttest.NewNopHost())) + + // Send some traces to the proc. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, proc.ConsumeTraces(context.Background(), testdata.GenerateTraces(1))) + } + + // Now shutdown the proc. + assert.NoError(t, proc.Shutdown(context.Background())) + + // The Shutdown() is done. It means the proc must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount, nextSink.SpanCount()) +} + +func verifyLogsDoesNotProduceAfterShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + // Create a proc and output its produce to a sink. + nextSink := new(consumertest.LogsSink) + proc, err := factory.CreateLogs(context.Background(), NewNopSettings(), cfg, nextSink) + if errors.Is(err, pipeline.ErrSignalNotSupported) { + return + } + require.NoError(t, err) + assert.NoError(t, proc.Start(context.Background(), componenttest.NewNopHost())) + + // Send some logs to the proc. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, proc.ConsumeLogs(context.Background(), testdata.GenerateLogs(1))) + } + + // Now shutdown the proc. + assert.NoError(t, proc.Shutdown(context.Background())) + + // The Shutdown() is done. It means the proc must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount, nextSink.LogRecordCount()) +} + +func verifyMetricsDoesNotProduceAfterShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + // Create a proc and output its produce to a sink. + nextSink := new(consumertest.MetricsSink) + proc, err := factory.CreateMetrics(context.Background(), NewNopSettings(), cfg, nextSink) + if errors.Is(err, pipeline.ErrSignalNotSupported) { + return + } + require.NoError(t, err) + assert.NoError(t, proc.Start(context.Background(), componenttest.NewNopHost())) + + // Send some metrics to the proc. testdata.GenerateMetrics creates metrics with 2 data points each. + const generatedCount = 10 + for i := 0; i < generatedCount; i++ { + require.NoError(t, proc.ConsumeMetrics(context.Background(), testdata.GenerateMetrics(1))) + } + + // Now shutdown the proc. + assert.NoError(t, proc.Shutdown(context.Background())) + + // The Shutdown() is done. It means the proc must have sent everything we + // gave it to the next sink. + assert.EqualValues(t, generatedCount*2, nextSink.DataPointCount()) +} + +// VerifyShutdown verifies the processor doesn't produce telemetry data after shutdown. +func VerifyShutdown(t *testing.T, factory processor.Factory, cfg component.Config) { + verifyTracesDoesNotProduceAfterShutdown(t, factory, cfg) + verifyLogsDoesNotProduceAfterShutdown(t, factory, cfg) + verifyMetricsDoesNotProduceAfterShutdown(t, factory, cfg) +} diff --git a/vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go b/vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go new file mode 100644 index 00000000000..ce6d7d2f87a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/processortest/unhealthy_processor.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package processortest // import "go.opentelemetry.io/collector/processor/processortest" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor" +) + +// NewUnhealthyProcessorFactory returns a processor.Factory that constructs nop processors. +func NewUnhealthyProcessorFactory() processor.Factory { + return processor.NewFactory( + component.MustNewType("unhealthy"), + func() component.Config { + return &struct{}{} + }, + processor.WithTraces(createUnhealthyTraces, component.StabilityLevelStable), + processor.WithMetrics(createUnhealthyMetrics, component.StabilityLevelStable), + processor.WithLogs(createUnhealthyLogs, component.StabilityLevelStable), + ) +} + +func createUnhealthyTraces(_ context.Context, set processor.Settings, _ component.Config, _ consumer.Traces) (processor.Traces, error) { + return &unhealthy{ + Consumer: consumertest.NewNop(), + telemetry: set.TelemetrySettings, + }, nil +} + +func createUnhealthyMetrics(_ context.Context, set processor.Settings, _ component.Config, _ consumer.Metrics) (processor.Metrics, error) { + return &unhealthy{ + Consumer: consumertest.NewNop(), + telemetry: set.TelemetrySettings, + }, nil +} + +func createUnhealthyLogs(_ context.Context, set processor.Settings, _ component.Config, _ consumer.Logs) (processor.Logs, error) { + return &unhealthy{ + Consumer: consumertest.NewNop(), + telemetry: set.TelemetrySettings, + }, nil +} + +type unhealthy struct { + component.StartFunc + component.ShutdownFunc + consumertest.Consumer + telemetry component.TelemetrySettings +} + +func (p unhealthy) Start(_ context.Context, host component.Host) error { + go func() { + componentstatus.ReportStatus(host, componentstatus.NewEvent(componentstatus.StatusRecoverableError)) + }() + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE b/vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile b/vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go b/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go new file mode 100644 index 00000000000..b029abbf519 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/processor/xprocessor/processor.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xprocessor // import "go.opentelemetry.io/collector/processor/xprocessor" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" +) + +// Factory is a component.Factory interface for processors. +// +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + processor.Factory + + // CreateProfiles creates a Profiles processor based on this config. + // If the processor type does not support tracing or if the config is not valid, + // an error will be returned instead. + CreateProfiles(ctx context.Context, set processor.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) + + // ProfilesStability gets the stability level of the Profiles processor. + ProfilesStability() component.StabilityLevel +} + +// Profiles is a processor that can consume profiles. +type Profiles interface { + component.Component + xconsumer.Profiles +} + +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles(). +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles(). +type CreateProfilesFunc func(context.Context, processor.Settings, component.Config, xconsumer.Profiles) (Profiles, error) + +// CreateProfiles implements Factory.CreateProfiles. +func (f CreateProfilesFunc) CreateProfiles(ctx context.Context, set processor.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// FactoryOption apply changes to ReceiverOptions. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is an ReceiverFactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factory struct { + processor.Factory + CreateProfilesFunc + profilesStabilityLevel component.StabilityLevel +} + +func (f factory) ProfilesStability() component.StabilityLevel { + return f.profilesStabilityLevel +} + +type factoryOpts struct { + opts []processor.FactoryOption + *factory +} + +// WithTraces overrides the default "error not supported" implementation for CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces processor.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithTraces(createTraces, sl)) + }) +} + +// WithMetrics overrides the default "error not supported" implementation for CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics processor.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithMetrics(createMetrics, sl)) + }) +} + +// WithLogs overrides the default "error not supported" implementation for CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs processor.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, processor.WithLogs(createLogs, sl)) + }) +} + +// WithProfiles overrides the default "error not supported" implementation for CreateProfiles and the default "undefined" stability level. +func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesStabilityLevel = sl + o.CreateProfilesFunc = createProfiles + }) +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.factory.Factory = processor.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_receiver.go b/vendor/go.opentelemetry.io/collector/receiver/internal/obsmetrics.go similarity index 85% rename from vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_receiver.go rename to vendor/go.opentelemetry.io/collector/receiver/internal/obsmetrics.go index bcb6306f81b..fc3a5cf9509 100644 --- a/vendor/go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics/obs_receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/internal/obsmetrics.go @@ -1,9 +1,12 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package obsmetrics // import "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" +package internal // import "go.opentelemetry.io/collector/receiver/internal" const ( + // SpanNameSep is duplicate between receiver and exporter. + SpanNameSep = "/" + // ReceiverKey used to identify receivers in metrics and traces. ReceiverKey = "receiver" // TransportKey used to identify the transport used to received the data. @@ -27,11 +30,7 @@ const ( // RefusedLogRecordsKey used to identify log records refused (ie.: not ingested) by the // Collector. RefusedLogRecordsKey = "refused_log_records" -) -var ( - ReceiverPrefix = ReceiverKey + SpanNameSep - ReceiverMetricPrefix = ReceiverKey + MetricNameSep ReceiveTraceDataOperationSuffix = SpanNameSep + "TraceDataReceived" ReceiverMetricsOperationSuffix = SpanNameSep + "MetricsReceived" ReceiverLogsOperationSuffix = SpanNameSep + "LogsReceived" diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md index a5572071629..aab0d924148 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/README.md @@ -3,15 +3,19 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | -| Distributions | [core], [contrib] | -| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fotlp) | +| Distributions | [core], [contrib], [k8s], [otlp] | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Areceiver%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Areceiver%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Areceiver%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Areceiver%2Fotlp) | -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development +[beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta +[stable]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib +[k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s +[otlp]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-otlp Receives data via gRPC or HTTP using [OTLP]( @@ -34,11 +38,11 @@ receivers: The following settings are configurable: -- `endpoint` (default = 0.0.0.0:4317 for grpc protocol, 0.0.0.0:4318 http protocol): +- `endpoint` (default = localhost:4317 for grpc protocol, localhost:4318 http protocol): host:port to which the receiver is going to receive data. The valid syntax is - described at https://github.com/grpc/grpc/blob/master/doc/naming.md. The - `component.UseLocalHostAsDefaultHost` feature gate changes these to localhost:4317 and - localhost:4318 respectively. This will become the default in a future release. + described at https://github.com/grpc/grpc/blob/master/doc/naming.md. See our + [security best practices doc](https://opentelemetry.io/docs/security/config-best-practices/#protect-against-denial-of-service-attacks) + to understand how to set the endpoint in different environments. ## Advanced Configuration @@ -55,15 +59,21 @@ The OTLP receiver can receive trace export calls via HTTP/JSON in addition to gRPC. The HTTP/JSON address is the same as gRPC as the protocol is recognized and processed accordingly. Note the serialization format needs to be [OTLP JSON](https://opentelemetry.io/docs/specs/otlp/#json-protobuf-encoding). -The HTTP/JSON configuration also provides `traces_url_path`, `metrics_url_path`, and `logs_url_path` -configuration to allow the URL paths that signal data needs to be sent to be modified per signal type. These default to -`/v1/traces`, `/v1/metrics`, and `/v1/logs` respectively. - -To write traces with HTTP/JSON, `POST` to `[address]/[traces_url_path]` for traces, -to `[address]/[metrics_url_path]` for metrics, to `[address]/[logs_url_path]` for logs. -The default port is `4318`. When using the `otlphttpexporter` peer to communicate with this component, -use the `traces_endpoint`, `metrics_endpoint`, and `logs_endpoint` settings in the `otlphttpexporter` to set the -proper URL to match the address and URL signal path on the `otlpreceiver`. +The HTTP/JSON configuration also provides `traces_url_path`, +`metrics_url_path`, `logs_url_path`, and `profiles_url_path` configurations to +allow the URL paths that signal data needs to be sent to be modified per signal +type. These default to `/v1/traces`, `/v1/metrics`, `/v1/logs`, and +`/v1/profiles` respectively. + +To write traces with HTTP/JSON, `POST` to `[address]/[traces_url_path]` for +traces, to `[address]/[metrics_url_path]` for metrics, to +`[address]/[logs_url_path]` for logs, and to `[address]/[profiles_url_path]` for +profiles. +The default port is `4318`. When using the `otlphttpexporter` peer to +communicate with this component, use the `traces_endpoint`, +`metrics_endpoint`, `logs_endpoint`, and `profiles_endpoint` settings in the +`otlphttpexporter` to set the proper URL to match the address and URL signal +path on the `otlpreceiver`. ### CORS (Cross-origin resource sharing) @@ -93,7 +103,5 @@ receivers: max_age: 7200 ``` -[beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol -[stable]: https://github.com/open-telemetry/opentelemetry-collector#stable diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go index 8e512f5a278..b40a0476fed 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.go @@ -46,8 +46,10 @@ type Config struct { Protocols `mapstructure:"protocols"` } -var _ component.Config = (*Config)(nil) -var _ confmap.Unmarshaler = (*Config)(nil) +var ( + _ component.Config = (*Config)(nil) + _ confmap.Unmarshaler = (*Config)(nil) +) // Validate checks the receiver configuration is valid func (cfg *Config) Validate() error { diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md index df6cf931a12..b77b4bdae73 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/config.md @@ -20,7 +20,7 @@ Config defines configuration for OTLP receiver. | Name | Type | Default | Docs | |------------------------|-----------------------------------------------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| endpoint | string | 0.0.0.0:4317 | Endpoint configures the address for this network connection. For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. | +| endpoint | string | localhost:4317 | Endpoint configures the address for this network connection. For TCP and UDP networks, the address has the form "host:port". The host must be a literal IP address, or a host name that can be resolved to IP addresses. The port must be a literal port number or a service name. If the host is a literal IPv6 address it must be enclosed in square brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". The zone specifies the scope of the literal IPv6 address as defined in RFC 4007. | | transport | string | tcp | Transport to use. Known protocols are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only), "udp", "udp4" (IPv4-only), "udp6" (IPv6-only), "ip", "ip4" (IPv4-only), "ip6" (IPv6-only), "unix", "unixgram" and "unixpacket". | | tls | [configtls-TLSServerSetting](#configtls-tlsserversetting) | | Configures the protocol to use TLS. The default value is nil, which will cause the protocol to not use TLS. | | max_recv_msg_size_mib | uint64 | | MaxRecvMsgSizeMiB sets the maximum size (in MiB) of messages accepted by the server. | @@ -73,10 +73,10 @@ Config defines configuration for OTLP receiver. | Name | Type | Default | Docs | |-----------------------|-----------------------------------------------------------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------| -| endpoint | string | 0.0.0.0:4318 | Endpoint configures the listening address for the server. | +| endpoint | string | localhost:4318 | Endpoint configures the listening address for the server. | | tls | [configtls-TLSServerSetting](#configtls-tlsserversetting) | | TLSSetting struct exposes TLS client configuration. | | cors | [confighttp-CORSConfig](#confighttp-corsconfig) | | CORSConfig configures a receiver for HTTP cross-origin resource sharing (CORS). | -| max_request_body_size | int | 0 | MaxRequestBodySize configures the maximum allowed body size in bytes for a single request. The default `0` means there's no restriction | +| max_request_body_size | int | 20971520 | MaxRequestBodySize configures the maximum allowed body size in bytes for a single request. The default `20971520` means 20MiB | ### confighttp-CORSConfig diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go index d37437c7ca9..6462aec745d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/encoder.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) @@ -30,10 +31,12 @@ type encoder interface { unmarshalTracesRequest(buf []byte) (ptraceotlp.ExportRequest, error) unmarshalMetricsRequest(buf []byte) (pmetricotlp.ExportRequest, error) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, error) + unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) marshalTracesResponse(ptraceotlp.ExportResponse) ([]byte, error) marshalMetricsResponse(pmetricotlp.ExportResponse) ([]byte, error) marshalLogsResponse(plogotlp.ExportResponse) ([]byte, error) + marshalProfilesResponse(pprofileotlp.ExportResponse) ([]byte, error) marshalStatus(rsp *spb.Status) ([]byte, error) @@ -60,6 +63,12 @@ func (protoEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, er return req, err } +func (protoEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) { + req := pprofileotlp.NewExportRequest() + err := req.UnmarshalProto(buf) + return req, err +} + func (protoEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalProto() } @@ -72,6 +81,10 @@ func (protoEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, e return resp.MarshalProto() } +func (protoEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) { + return resp.MarshalProto() +} + func (protoEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { return proto.Marshal(resp) } @@ -100,6 +113,12 @@ func (jsonEncoder) unmarshalLogsRequest(buf []byte) (plogotlp.ExportRequest, err return req, err } +func (jsonEncoder) unmarshalProfilesRequest(buf []byte) (pprofileotlp.ExportRequest, error) { + req := pprofileotlp.NewExportRequest() + err := req.UnmarshalJSON(buf) + return req, err +} + func (jsonEncoder) marshalTracesResponse(resp ptraceotlp.ExportResponse) ([]byte, error) { return resp.MarshalJSON() } @@ -112,6 +131,10 @@ func (jsonEncoder) marshalLogsResponse(resp plogotlp.ExportResponse) ([]byte, er return resp.MarshalJSON() } +func (jsonEncoder) marshalProfilesResponse(resp pprofileotlp.ExportResponse) ([]byte, error) { + return resp.MarshalJSON() +} + func (jsonEncoder) marshalStatus(resp *spb.Status) ([]byte, error) { buf := new(bytes.Buffer) err := jsonPbMarshaler.Marshal(buf, resp) diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go index 4b148c40be3..cb7cc0a6c7d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/factory.go @@ -11,29 +11,29 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/internal/localhostgate" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/sharedcomponent" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata" + "go.opentelemetry.io/collector/receiver/xreceiver" ) const ( - grpcPort = 4317 - httpPort = 4318 - - defaultTracesURLPath = "/v1/traces" - defaultMetricsURLPath = "/v1/metrics" - defaultLogsURLPath = "/v1/logs" + defaultTracesURLPath = "/v1/traces" + defaultMetricsURLPath = "/v1/metrics" + defaultLogsURLPath = "/v1/logs" + defaultProfilesURLPath = "/v1development/profiles" ) // NewFactory creates a new OTLP receiver factory. func NewFactory() receiver.Factory { - return receiver.NewFactory( + return xreceiver.NewFactory( metadata.Type, createDefaultConfig, - receiver.WithTraces(createTraces, metadata.TracesStability), - receiver.WithMetrics(createMetrics, metadata.MetricsStability), - receiver.WithLogs(createLog, metadata.LogsStability), + xreceiver.WithTraces(createTraces, metadata.TracesStability), + xreceiver.WithMetrics(createMetrics, metadata.MetricsStability), + xreceiver.WithLogs(createLog, metadata.LogsStability), + xreceiver.WithProfiles(createProfiles, metadata.ProfilesStability), ) } @@ -43,7 +43,7 @@ func createDefaultConfig() component.Config { Protocols: Protocols{ GRPC: &configgrpc.ServerConfig{ NetAddr: confignet.AddrConfig{ - Endpoint: localhostgate.EndpointForPort(grpcPort), + Endpoint: "localhost:4317", Transport: confignet.TransportTypeTCP, }, // We almost write 0 bytes, so no need to tune WriteBufferSize. @@ -51,7 +51,7 @@ func createDefaultConfig() component.Config { }, HTTP: &HTTPConfig{ ServerConfig: &confighttp.ServerConfig{ - Endpoint: localhostgate.EndpointForPort(httpPort), + Endpoint: "localhost:4318", }, TracesURLPath: defaultTracesURLPath, MetricsURLPath: defaultMetricsURLPath, @@ -64,7 +64,7 @@ func createDefaultConfig() component.Config { // createTraces creates a trace receiver based on provided config. func createTraces( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { @@ -74,7 +74,6 @@ func createTraces( func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, - &set.TelemetrySettings, ) if err != nil { return nil, err @@ -87,7 +86,7 @@ func createTraces( // createMetrics creates a metrics receiver based on provided config. func createMetrics( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, consumer consumer.Metrics, ) (receiver.Metrics, error) { @@ -97,7 +96,6 @@ func createMetrics( func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, - &set.TelemetrySettings, ) if err != nil { return nil, err @@ -110,7 +108,7 @@ func createMetrics( // createLog creates a log receiver based on provided config. func createLog( _ context.Context, - set receiver.CreateSettings, + set receiver.Settings, cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { @@ -120,7 +118,6 @@ func createLog( func() (*otlpReceiver, error) { return newOtlpReceiver(oCfg, &set) }, - &set.TelemetrySettings, ) if err != nil { return nil, err @@ -130,9 +127,31 @@ func createLog( return r, nil } +// createProfiles creates a trace receiver based on provided config. +func createProfiles( + _ context.Context, + set receiver.Settings, + cfg component.Config, + nextConsumer xconsumer.Profiles, +) (xreceiver.Profiles, error) { + oCfg := cfg.(*Config) + r, err := receivers.LoadOrStore( + oCfg, + func() (*otlpReceiver, error) { + return newOtlpReceiver(oCfg, &set) + }, + ) + if err != nil { + return nil, err + } + + r.Unwrap().registerProfilesConsumer(nextConsumer) + return r, nil +} + // This is the map of already created OTLP receivers for particular configurations. -// We maintain this map because the Factory is asked trace and metric receivers separately -// when it gets CreateTracesReceiver() and CreateMetricsReceiver() but they must not +// We maintain this map because the receiver.Factory is asked trace and metric receivers separately +// when it gets CreateTraces() and CreateMetrics() but they must not // create separate objects, they must use one otlpReceiver object per configuration. // When the receiver is shutdown it should be removed from this map so the same configuration // can be recreated successfully. diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go index 0619b7aa8ea..519a31fbc25 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors/errors.go @@ -40,6 +40,18 @@ func GetHTTPStatusCodeFromStatus(s *status.Status) int { case codes.ResourceExhausted: return http.StatusTooManyRequests // Not Retryable + case codes.InvalidArgument: + return http.StatusBadRequest + // Not Retryable + case codes.Unauthenticated: + return http.StatusUnauthorized + // Not Retryable + case codes.PermissionDenied: + return http.StatusForbidden + // Not Retryable + case codes.Unimplemented: + return http.StatusNotFound + // Not Retryable default: return http.StatusInternalServerError } diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go index c9cff844fa2..5fe9771f5dd 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata/generated_status.go @@ -7,11 +7,13 @@ import ( ) var ( - Type = component.MustNewType("otlp") + Type = component.MustNewType("otlp") + ScopeName = "go.opentelemetry.io/collector/receiver/otlpreceiver" ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + ProfilesStability = component.StabilityLevelDevelopment + LogsStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go new file mode 100644 index 00000000000..269769b6824 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles/otlp.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package profiles // import "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" + +import ( + "context" + + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" +) + +// Receiver is the type used to handle spans from OpenTelemetry exporters. +type Receiver struct { + pprofileotlp.UnimplementedGRPCServer + nextConsumer xconsumer.Profiles +} + +// New creates a new Receiver reference. +func New(nextConsumer xconsumer.Profiles) *Receiver { + return &Receiver{ + nextConsumer: nextConsumer, + } +} + +// Export implements the service Export profiles func. +func (r *Receiver) Export(ctx context.Context, req pprofileotlp.ExportRequest) (pprofileotlp.ExportResponse, error) { + td := req.Profiles() + // We need to ensure that it propagates the receiver name as a tag + numProfiles := td.SampleCount() + if numProfiles == 0 { + return pprofileotlp.NewExportResponse(), nil + } + + err := r.nextConsumer.ConsumeProfiles(ctx, td) + // Use appropriate status codes for permanent/non-permanent errors + // If we return the error straightaway, then the grpc implementation will set status code to Unknown + // Refer: https://github.com/grpc/grpc-go/blob/v1.59.0/server.go#L1345 + // So, convert the error to appropriate grpc status and return the error + // NonPermanent errors will be converted to codes.Unavailable (equivalent to HTTP 503) + // Permanent errors will be converted to codes.InvalidArgument (equivalent to HTTP 400) + if err != nil { + return pprofileotlp.NewExportResponse(), errors.GetStatusFromError(err) + } + + return pprofileotlp.NewExportResponse(), nil +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml index c3036a3341d..cae2267ea3a 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/metadata.yaml @@ -1,8 +1,10 @@ type: otlp +github_project: open-telemetry/opentelemetry-collector status: class: receiver stability: stable: [traces, metrics] beta: [logs] - distributions: [core, contrib] + development: [profiles] + distributions: [core, contrib, k8s, otlp] diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go index 43eed9b821a..0f8d3c66378 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlp.go @@ -14,14 +14,18 @@ import ( "google.golang.org/grpc" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" "go.opentelemetry.io/collector/receiver" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" "go.opentelemetry.io/collector/receiver/receiverhelper" ) @@ -32,27 +36,29 @@ type otlpReceiver struct { serverGRPC *grpc.Server serverHTTP *http.Server - nextTraces consumer.Traces - nextMetrics consumer.Metrics - nextLogs consumer.Logs - shutdownWG sync.WaitGroup + nextTraces consumer.Traces + nextMetrics consumer.Metrics + nextLogs consumer.Logs + nextProfiles xconsumer.Profiles + shutdownWG sync.WaitGroup obsrepGRPC *receiverhelper.ObsReport obsrepHTTP *receiverhelper.ObsReport - settings *receiver.CreateSettings + settings *receiver.Settings } // newOtlpReceiver just creates the OpenTelemetry receiver services. It is the caller's // responsibility to invoke the respective Start*Reception methods as well // as the various Stop*Reception methods to end it. -func newOtlpReceiver(cfg *Config, set *receiver.CreateSettings) (*otlpReceiver, error) { +func newOtlpReceiver(cfg *Config, set *receiver.Settings) (*otlpReceiver, error) { r := &otlpReceiver{ - cfg: cfg, - nextTraces: nil, - nextMetrics: nil, - nextLogs: nil, - settings: set, + cfg: cfg, + nextTraces: nil, + nextMetrics: nil, + nextLogs: nil, + nextProfiles: nil, + settings: set, } var err error @@ -99,6 +105,10 @@ func (r *otlpReceiver) startGRPCServer(host component.Host) error { plogotlp.RegisterGRPCServer(r.serverGRPC, logs.New(r.nextLogs, r.obsrepGRPC)) } + if r.nextProfiles != nil { + pprofileotlp.RegisterGRPCServer(r.serverGRPC, profiles.New(r.nextProfiles)) + } + r.settings.Logger.Info("Starting GRPC server", zap.String("endpoint", r.cfg.GRPC.NetAddr.Endpoint)) var gln net.Listener if gln, err = r.cfg.GRPC.NetAddr.Listen(context.Background()); err != nil { @@ -110,7 +120,7 @@ func (r *otlpReceiver) startGRPCServer(host component.Host) error { defer r.shutdownWG.Done() if errGrpc := r.serverGRPC.Serve(gln); errGrpc != nil && !errors.Is(errGrpc, grpc.ErrServerStopped) { - r.settings.ReportStatus(component.NewFatalErrorEvent(errGrpc)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errGrpc)) } }() return nil @@ -144,6 +154,13 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) }) } + if r.nextProfiles != nil { + httpProfilesReceiver := profiles.New(r.nextProfiles) + httpMux.HandleFunc(defaultProfilesURLPath, func(resp http.ResponseWriter, req *http.Request) { + handleProfiles(resp, req, httpProfilesReceiver) + }) + } + var err error if r.serverHTTP, err = r.cfg.HTTP.ToServer(ctx, host, r.settings.TelemetrySettings, httpMux, confighttp.WithErrorHandler(errorHandler)); err != nil { return err @@ -160,7 +177,7 @@ func (r *otlpReceiver) startHTTPServer(ctx context.Context, host component.Host) defer r.shutdownWG.Done() if errHTTP := r.serverHTTP.Serve(hln); errHTTP != nil && !errors.Is(errHTTP, http.ErrServerClosed) { - r.settings.ReportStatus(component.NewFatalErrorEvent(errHTTP)) + componentstatus.ReportStatus(host, componentstatus.NewFatalErrorEvent(errHTTP)) } }() return nil @@ -209,3 +226,7 @@ func (r *otlpReceiver) registerMetricsConsumer(mc consumer.Metrics) { func (r *otlpReceiver) registerLogsConsumer(lc consumer.Logs) { r.nextLogs = lc } + +func (r *otlpReceiver) registerProfilesConsumer(tc xconsumer.Profiles) { + r.nextProfiles = tc +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go index b895b8d2e7c..4c9c8231fe7 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go +++ b/vendor/go.opentelemetry.io/collector/receiver/otlpreceiver/otlphttp.go @@ -16,6 +16,7 @@ import ( "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics" + "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles" "go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace" ) @@ -117,6 +118,37 @@ func handleLogs(resp http.ResponseWriter, req *http.Request, logsReceiver *logs. writeResponse(resp, enc.contentType(), http.StatusOK, msg) } +func handleProfiles(resp http.ResponseWriter, req *http.Request, profilesReceiver *profiles.Receiver) { + enc, ok := readContentType(resp, req) + if !ok { + return + } + + body, ok := readAndCloseBody(resp, req, enc) + if !ok { + return + } + + otlpReq, err := enc.unmarshalProfilesRequest(body) + if err != nil { + writeError(resp, enc, err, http.StatusBadRequest) + return + } + + otlpResp, err := profilesReceiver.Export(req.Context(), otlpReq) + if err != nil { + writeError(resp, enc, err, http.StatusInternalServerError) + return + } + + msg, err := enc.marshalProfilesResponse(otlpResp) + if err != nil { + writeError(resp, enc, err, http.StatusInternalServerError) + return + } + writeResponse(resp, enc.contentType(), http.StatusOK, msg) +} + func readContentType(resp http.ResponseWriter, req *http.Request) (encoder, bool) { if req.Method != http.MethodPost { handleUnmatchedMethod(resp) diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receiver.go index 3b0f0ac8371..dd7242d5f77 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiver.go @@ -5,22 +5,16 @@ package receiver // import "go.opentelemetry.io/collector/receiver" import ( "context" - "errors" "fmt" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" -) - -var ( - errNilNextConsumer = errors.New("nil next Consumer") + "go.opentelemetry.io/collector/pipeline" ) // Traces receiver receives traces. // Its purpose is to translate data from any format to the collector's internal trace format. -// TracesReceiver feeds a consumer.Traces with data. +// Traces receiver feeds a consumer.Traces with data. // // For example, it could be Zipkin data source which translates Zipkin spans into ptrace.Traces. type Traces interface { @@ -29,7 +23,7 @@ type Traces interface { // Metrics receiver receives metrics. // Its purpose is to translate data from any format to the collector's internal metrics format. -// MetricsReceiver feeds a consumer.Metrics with data. +// Metrics receiver feeds a consumer.Metrics with data. // // For example, it could be Prometheus data source which translates Prometheus metrics into pmetric.Metrics. type Metrics interface { @@ -38,15 +32,15 @@ type Metrics interface { // Logs receiver receives logs. // Its purpose is to translate data from any format to the collector's internal logs data format. -// LogsReceiver feeds a consumer.Logs with data. +// Logs receiver feeds a consumer.Logs with data. // // For example, it could be a receiver that reads syslogs and convert them into plog.Logs. type Logs interface { component.Component } -// CreateSettings configures Receiver creators. -type CreateSettings struct { +// Settings configures receiver creators. +type Settings struct { // ID returns the ID of the component that will be created. ID component.ID @@ -56,47 +50,50 @@ type CreateSettings struct { BuildInfo component.BuildInfo } -// Factory is factory interface for receivers. +// Factory is a factory interface for receivers. // // This interface cannot be directly implemented. Implementations must -// use the NewReceiverFactory to implement it. +// use the NewFactory to implement it. type Factory interface { component.Factory - // CreateTracesReceiver creates a TracesReceiver based on this config. - // If the receiver type does not support tracing or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. - CreateTracesReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Traces) (Traces, error) + // CreateTraces creates a Traces based on this config. + // If the receiver type does not support traces, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) - // TracesReceiverStability gets the stability level of the TracesReceiver. - TracesReceiverStability() component.StabilityLevel + // TracesStability gets the stability level of the Traces receiver. + TracesStability() component.StabilityLevel - // CreateMetricsReceiver creates a MetricsReceiver based on this config. - // If the receiver type does not support metrics or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. - CreateMetricsReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Metrics) (Metrics, error) + // CreateMetrics creates a Metrics based on this config. + // If the receiver type does not support metrics, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) - // MetricsReceiverStability gets the stability level of the MetricsReceiver. - MetricsReceiverStability() component.StabilityLevel + // MetricsStability gets the stability level of the Metrics receiver. + MetricsStability() component.StabilityLevel - // CreateLogsReceiver creates a LogsReceiver based on this config. - // If the receiver type does not support the data type or if the config is not valid - // an error will be returned instead. `nextConsumer` is never nil. - CreateLogsReceiver(ctx context.Context, set CreateSettings, cfg component.Config, nextConsumer consumer.Logs) (Logs, error) + // CreateLogs creates a Logs based on this config. + // If the receiver type does not support logs, + // this function returns the error [pipeline.ErrSignalNotSupported]. + // Implementers can assume `next` is never nil. + CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) - // LogsReceiverStability gets the stability level of the LogsReceiver. - LogsReceiverStability() component.StabilityLevel + // LogsStability gets the stability level of the Logs receiver. + LogsStability() component.StabilityLevel unexportedFactoryFunc() } -// FactoryOption apply changes to ReceiverOptions. +// FactoryOption apply changes to Factory. type FactoryOption interface { // applyOption applies the option. applyOption(o *factory) } -// factoryOptionFunc is an ReceiverFactoryOption created through a function. +// factoryOptionFunc is an FactoryOption created through a function. type factoryOptionFunc func(*factory) func (f factoryOptionFunc) applyOption(o *factory) { @@ -104,50 +101,36 @@ func (f factoryOptionFunc) applyOption(o *factory) { } // CreateTracesFunc is the equivalent of Factory.CreateTraces. -type CreateTracesFunc func(context.Context, CreateSettings, component.Config, consumer.Traces) (Traces, error) - -// CreateTracesReceiver implements Factory.CreateTracesReceiver(). -func (f CreateTracesFunc) CreateTracesReceiver( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Traces) (Traces, error) { +type CreateTracesFunc func(context.Context, Settings, component.Config, consumer.Traces) (Traces, error) + +// CreateTraces implements Factory.CreateTraces(). +func (f CreateTracesFunc) CreateTraces(ctx context.Context, set Settings, cfg component.Config, next consumer.Traces) (Traces, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } // CreateMetricsFunc is the equivalent of Factory.CreateMetrics. -type CreateMetricsFunc func(context.Context, CreateSettings, component.Config, consumer.Metrics) (Metrics, error) - -// CreateMetricsReceiver implements Factory.CreateMetricsReceiver(). -func (f CreateMetricsFunc) CreateMetricsReceiver( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Metrics, -) (Metrics, error) { +type CreateMetricsFunc func(context.Context, Settings, component.Config, consumer.Metrics) (Metrics, error) + +// CreateMetrics implements Factory.CreateMetrics. +func (f CreateMetricsFunc) CreateMetrics(ctx context.Context, set Settings, cfg component.Config, next consumer.Metrics) (Metrics, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } -// CreateLogsFunc is the equivalent of ReceiverFactory.CreateLogsReceiver(). -type CreateLogsFunc func(context.Context, CreateSettings, component.Config, consumer.Logs) (Logs, error) +// CreateLogsFunc is the equivalent of Factory.CreateLogs. +type CreateLogsFunc func(context.Context, Settings, component.Config, consumer.Logs) (Logs, error) -// CreateLogsReceiver implements Factory.CreateLogsReceiver(). -func (f CreateLogsFunc) CreateLogsReceiver( - ctx context.Context, - set CreateSettings, - cfg component.Config, - nextConsumer consumer.Logs, -) (Logs, error) { +// CreateLogs implements Factory.CreateLogs. +func (f CreateLogsFunc) CreateLogs(ctx context.Context, set Settings, cfg component.Config, next consumer.Logs) (Logs, error) { if f == nil { - return nil, component.ErrDataTypeIsNotSupported + return nil, pipeline.ErrSignalNotSupported } - return f(ctx, set, cfg, nextConsumer) + return f(ctx, set, cfg, next) } type factory struct { @@ -167,39 +150,39 @@ func (f *factory) Type() component.Type { func (f *factory) unexportedFactoryFunc() {} -func (f *factory) TracesReceiverStability() component.StabilityLevel { +func (f *factory) TracesStability() component.StabilityLevel { return f.tracesStabilityLevel } -func (f *factory) MetricsReceiverStability() component.StabilityLevel { +func (f *factory) MetricsStability() component.StabilityLevel { return f.metricsStabilityLevel } -func (f *factory) LogsReceiverStability() component.StabilityLevel { +func (f *factory) LogsStability() component.StabilityLevel { return f.logsStabilityLevel } -// WithTraces overrides the default "error not supported" implementation for CreateTracesReceiver and the default "undefined" stability level. -func WithTraces(createTracesReceiver CreateTracesFunc, sl component.StabilityLevel) FactoryOption { +// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces CreateTracesFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.tracesStabilityLevel = sl - o.CreateTracesFunc = createTracesReceiver + o.CreateTracesFunc = createTraces }) } -// WithMetrics overrides the default "error not supported" implementation for CreateMetricsReceiver and the default "undefined" stability level. -func WithMetrics(createMetricsReceiver CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { +// WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.metricsStabilityLevel = sl - o.CreateMetricsFunc = createMetricsReceiver + o.CreateMetricsFunc = createMetrics }) } -// WithLogs overrides the default "error not supported" implementation for CreateLogsReceiver and the default "undefined" stability level. -func WithLogs(createLogsReceiver CreateLogsFunc, sl component.StabilityLevel) FactoryOption { +// WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs CreateLogsFunc, sl component.StabilityLevel) FactoryOption { return factoryOptionFunc(func(o *factory) { o.logsStabilityLevel = sl - o.CreateLogsFunc = createLogsReceiver + o.CreateLogsFunc = createLogs }) } @@ -227,86 +210,3 @@ func MakeFactoryMap(factories ...Factory) (map[component.Type]Factory, error) { } return fMap, nil } - -// Builder receiver is a helper struct that given a set of Configs and Factories helps with creating receivers. -type Builder struct { - cfgs map[component.ID]component.Config - factories map[component.Type]Factory -} - -// NewBuilder creates a new receiver.Builder to help with creating components form a set of configs and factories. -func NewBuilder(cfgs map[component.ID]component.Config, factories map[component.Type]Factory) *Builder { - return &Builder{cfgs: cfgs, factories: factories} -} - -// CreateTraces creates a Traces receiver based on the settings and config. -func (b *Builder) CreateTraces(ctx context.Context, set CreateSettings, next consumer.Traces) (Traces, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("receiver %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.TracesReceiverStability()) - return f.CreateTracesReceiver(ctx, set, cfg, next) -} - -// CreateMetrics creates a Metrics receiver based on the settings and config. -func (b *Builder) CreateMetrics(ctx context.Context, set CreateSettings, next consumer.Metrics) (Metrics, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("receiver %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.MetricsReceiverStability()) - return f.CreateMetricsReceiver(ctx, set, cfg, next) -} - -// CreateLogs creates a Logs receiver based on the settings and config. -func (b *Builder) CreateLogs(ctx context.Context, set CreateSettings, next consumer.Logs) (Logs, error) { - if next == nil { - return nil, errNilNextConsumer - } - cfg, existsCfg := b.cfgs[set.ID] - if !existsCfg { - return nil, fmt.Errorf("receiver %q is not configured", set.ID) - } - - f, existsFactory := b.factories[set.ID.Type()] - if !existsFactory { - return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) - } - - logStabilityLevel(set.Logger, f.LogsReceiverStability()) - return f.CreateLogsReceiver(ctx, set, cfg, next) -} - -func (b *Builder) Factory(componentType component.Type) component.Factory { - return b.factories[componentType] -} - -// logStabilityLevel logs the stability level of a component. The log level is set to info for -// undefined, unmaintained, deprecated and development. The log level is set to debug -// for alpha, beta and stable. -func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { - if sl >= component.StabilityLevelAlpha { - logger.Debug(sl.LogMessage()) - } else { - logger.Info(sl.LogMessage()) - } -} diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md index 457a856618e..4f88fd0956d 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/documentation.md @@ -6,50 +6,50 @@ The following telemetry is emitted by this component. -### receiver_accepted_log_records +### otelcol_receiver_accepted_log_records -Number of log records successfully pushed into the pipeline. +Number of log records successfully pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### receiver_accepted_metric_points +### otelcol_receiver_accepted_metric_points -Number of metric points successfully pushed into the pipeline. +Number of metric points successfully pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### receiver_accepted_spans +### otelcol_receiver_accepted_spans -Number of spans successfully pushed into the pipeline. +Number of spans successfully pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | -### receiver_refused_log_records +### otelcol_receiver_refused_log_records -Number of log records that could not be pushed into the pipeline. +Number of log records that could not be pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {records} | Sum | Int | true | -### receiver_refused_metric_points +### otelcol_receiver_refused_metric_points -Number of metric points that could not be pushed into the pipeline. +Number of metric points that could not be pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {datapoints} | Sum | Int | true | -### receiver_refused_spans +### otelcol_receiver_refused_spans -Number of spans that could not be pushed into the pipeline. +Number of spans that could not be pushed into the pipeline. [alpha] | Unit | Metric Type | Value Type | Monotonic | | ---- | ----------- | ---------- | --------- | -| 1 | Sum | Int | true | +| {spans} | Sum | Int | true | diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go index ee70b2e498f..0600af1470a 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata/generated_telemetry.go @@ -24,76 +24,77 @@ func Tracer(settings component.TelemetrySettings) trace.Tracer { // TelemetryBuilder provides an interface for components to report telemetry // as defined in metadata and user config. type TelemetryBuilder struct { + meter metric.Meter ReceiverAcceptedLogRecords metric.Int64Counter ReceiverAcceptedMetricPoints metric.Int64Counter ReceiverAcceptedSpans metric.Int64Counter ReceiverRefusedLogRecords metric.Int64Counter ReceiverRefusedMetricPoints metric.Int64Counter ReceiverRefusedSpans metric.Int64Counter - level configtelemetry.Level } -// telemetryBuilderOption applies changes to default builder. -type telemetryBuilderOption func(*TelemetryBuilder) +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} -// WithLevel sets the current telemetry level for the component. -func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { - return func(builder *TelemetryBuilder) { - builder.level = lvl - } +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) } // NewTelemetryBuilder provides a struct with methods to update all internal telemetry // for a component -func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { - builder := TelemetryBuilder{level: configtelemetry.LevelBasic} +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} for _, op := range options { - op(&builder) - } - var ( - err, errs error - meter metric.Meter - ) - if builder.level >= configtelemetry.LevelBasic { - meter = Meter(settings) - } else { - meter = noop.Meter{} + op.apply(&builder) } - builder.ReceiverAcceptedLogRecords, err = meter.Int64Counter( - "receiver_accepted_log_records", - metric.WithDescription("Number of log records successfully pushed into the pipeline."), - metric.WithUnit("1"), + builder.meter = Meter(settings) + var err, errs error + builder.ReceiverAcceptedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_accepted_log_records", + metric.WithDescription("Number of log records successfully pushed into the pipeline. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ReceiverAcceptedMetricPoints, err = meter.Int64Counter( - "receiver_accepted_metric_points", - metric.WithDescription("Number of metric points successfully pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverAcceptedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_accepted_metric_points", + metric.WithDescription("Number of metric points successfully pushed into the pipeline. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ReceiverAcceptedSpans, err = meter.Int64Counter( - "receiver_accepted_spans", - metric.WithDescription("Number of spans successfully pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverAcceptedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_accepted_spans", + metric.WithDescription("Number of spans successfully pushed into the pipeline. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedLogRecords, err = meter.Int64Counter( - "receiver_refused_log_records", - metric.WithDescription("Number of log records that could not be pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverRefusedLogRecords, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_refused_log_records", + metric.WithDescription("Number of log records that could not be pushed into the pipeline. [alpha]"), + metric.WithUnit("{records}"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedMetricPoints, err = meter.Int64Counter( - "receiver_refused_metric_points", - metric.WithDescription("Number of metric points that could not be pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverRefusedMetricPoints, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_refused_metric_points", + metric.WithDescription("Number of metric points that could not be pushed into the pipeline. [alpha]"), + metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ReceiverRefusedSpans, err = meter.Int64Counter( - "receiver_refused_spans", - metric.WithDescription("Number of spans that could not be pushed into the pipeline."), - metric.WithUnit("1"), + builder.ReceiverRefusedSpans, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64Counter( + "otelcol_receiver_refused_spans", + metric.WithDescription("Number of spans that could not be pushed into the pipeline. [alpha]"), + metric.WithUnit("{spans}"), ) errs = errors.Join(errs, err) return &builder, errs } + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml index 2897d549a70..d13d7c3307b 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/metadata.yaml @@ -1,58 +1,70 @@ type: receiverhelper +github_project: open-telemetry/opentelemetry-collector status: class: receiver not_component: true stability: beta: [traces, metrics, logs] - distributions: [core, contrib] telemetry: metrics: receiver_accepted_spans: enabled: true + stability: + level: alpha description: Number of spans successfully pushed into the pipeline. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true receiver_refused_spans: enabled: true + stability: + level: alpha description: Number of spans that could not be pushed into the pipeline. - unit: 1 + unit: "{spans}" sum: value_type: int monotonic: true receiver_accepted_metric_points: enabled: true + stability: + level: alpha description: Number of metric points successfully pushed into the pipeline. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true receiver_refused_metric_points: enabled: true + stability: + level: alpha description: Number of metric points that could not be pushed into the pipeline. - unit: 1 + unit: "{datapoints}" sum: value_type: int monotonic: true receiver_accepted_log_records: enabled: true + stability: + level: alpha description: Number of log records successfully pushed into the pipeline. - unit: 1 + unit: "{records}" sum: value_type: int monotonic: true receiver_refused_log_records: enabled: true + stability: + level: alpha description: Number of log records that could not be pushed into the pipeline. - unit: 1 + unit: "{records}" sum: value_type: int - monotonic: true \ No newline at end of file + monotonic: true diff --git a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go index 18ff7556454..b30208deb36 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receiverhelper/obsreport.go @@ -12,25 +12,22 @@ import ( "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/internal" "go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata" ) // ObsReport is a helper to add observability to a receiver. type ObsReport struct { - level configtelemetry.Level spanNamePrefix string transport string longLivedCtx bool tracer trace.Tracer - logger *zap.Logger - otelAttrs []attribute.KeyValue + otelAttrs metric.MeasurementOption telemetryBuilder *metadata.TelemetryBuilder } @@ -44,7 +41,7 @@ type ObsReportSettings struct { // eg.: a gRPC stream, for which many batches of data are received in individual // operations without a corresponding new context per operation. LongLivedCtx bool - ReceiverCreateSettings receiver.CreateSettings + ReceiverCreateSettings receiver.Settings } // NewObsReport creates a new ObsReport. @@ -58,17 +55,15 @@ func newReceiver(cfg ObsReportSettings) (*ObsReport, error) { return nil, err } return &ObsReport{ - level: cfg.ReceiverCreateSettings.TelemetrySettings.MetricsLevel, - spanNamePrefix: obsmetrics.ReceiverPrefix + cfg.ReceiverID.String(), + spanNamePrefix: internal.ReceiverKey + internal.SpanNameSep + cfg.ReceiverID.String(), transport: cfg.Transport, longLivedCtx: cfg.LongLivedCtx, tracer: cfg.ReceiverCreateSettings.TracerProvider.Tracer(cfg.ReceiverID.String()), - logger: cfg.ReceiverCreateSettings.Logger, - otelAttrs: []attribute.KeyValue{ - attribute.String(obsmetrics.ReceiverKey, cfg.ReceiverID.String()), - attribute.String(obsmetrics.TransportKey, cfg.Transport), - }, + otelAttrs: metric.WithAttributeSet(attribute.NewSet( + attribute.String(internal.ReceiverKey, cfg.ReceiverID.String()), + attribute.String(internal.TransportKey, cfg.Transport), + )), telemetryBuilder: telemetryBuilder, }, nil } @@ -77,7 +72,7 @@ func newReceiver(cfg ObsReportSettings) (*ObsReport, error) { // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. func (rec *ObsReport) StartTracesOp(operationCtx context.Context) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiveTraceDataOperationSuffix) + return rec.startOp(operationCtx, internal.ReceiveTraceDataOperationSuffix) } // EndTracesOp completes the receive operation that was started with @@ -88,14 +83,14 @@ func (rec *ObsReport) EndTracesOp( numReceivedSpans int, err error, ) { - rec.endOp(receiverCtx, format, numReceivedSpans, err, component.DataTypeTraces) + rec.endOp(receiverCtx, format, numReceivedSpans, err, pipeline.SignalTraces) } // StartLogsOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. func (rec *ObsReport) StartLogsOp(operationCtx context.Context) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiverLogsOperationSuffix) + return rec.startOp(operationCtx, internal.ReceiverLogsOperationSuffix) } // EndLogsOp completes the receive operation that was started with @@ -106,14 +101,14 @@ func (rec *ObsReport) EndLogsOp( numReceivedLogRecords int, err error, ) { - rec.endOp(receiverCtx, format, numReceivedLogRecords, err, component.DataTypeLogs) + rec.endOp(receiverCtx, format, numReceivedLogRecords, err, pipeline.SignalLogs) } // StartMetricsOp is called when a request is received from a client. // The returned context should be used in other calls to the obsreport functions // dealing with the same receive operation. func (rec *ObsReport) StartMetricsOp(operationCtx context.Context) context.Context { - return rec.startOp(operationCtx, obsmetrics.ReceiverMetricsOperationSuffix) + return rec.startOp(operationCtx, internal.ReceiverMetricsOperationSuffix) } // EndMetricsOp completes the receive operation that was started with @@ -124,7 +119,7 @@ func (rec *ObsReport) EndMetricsOp( numReceivedPoints int, err error, ) { - rec.endOp(receiverCtx, format, numReceivedPoints, err, component.DataTypeMetrics) + rec.endOp(receiverCtx, format, numReceivedPoints, err, pipeline.SignalMetrics) } // startOp creates the span used to trace the operation. Returning @@ -147,7 +142,7 @@ func (rec *ObsReport) startOp(receiverCtx context.Context, operationSuffix strin } if rec.transport != "" { - span.SetAttributes(attribute.String(obsmetrics.TransportKey, rec.transport)) + span.SetAttributes(attribute.String(internal.TransportKey, rec.transport)) } return ctx } @@ -158,7 +153,7 @@ func (rec *ObsReport) endOp( format string, numReceivedItems int, err error, - dataType component.DataType, + signal pipeline.Signal, ) { numAccepted := numReceivedItems numRefused := 0 @@ -169,27 +164,25 @@ func (rec *ObsReport) endOp( span := trace.SpanFromContext(receiverCtx) - if rec.level != configtelemetry.LevelNone { - rec.recordMetrics(receiverCtx, dataType, numAccepted, numRefused) - } + rec.recordMetrics(receiverCtx, signal, numAccepted, numRefused) // end span according to errors if span.IsRecording() { var acceptedItemsKey, refusedItemsKey string - switch dataType { - case component.DataTypeTraces: - acceptedItemsKey = obsmetrics.AcceptedSpansKey - refusedItemsKey = obsmetrics.RefusedSpansKey - case component.DataTypeMetrics: - acceptedItemsKey = obsmetrics.AcceptedMetricPointsKey - refusedItemsKey = obsmetrics.RefusedMetricPointsKey - case component.DataTypeLogs: - acceptedItemsKey = obsmetrics.AcceptedLogRecordsKey - refusedItemsKey = obsmetrics.RefusedLogRecordsKey + switch signal { + case pipeline.SignalTraces: + acceptedItemsKey = internal.AcceptedSpansKey + refusedItemsKey = internal.RefusedSpansKey + case pipeline.SignalMetrics: + acceptedItemsKey = internal.AcceptedMetricPointsKey + refusedItemsKey = internal.RefusedMetricPointsKey + case pipeline.SignalLogs: + acceptedItemsKey = internal.AcceptedLogRecordsKey + refusedItemsKey = internal.RefusedLogRecordsKey } span.SetAttributes( - attribute.String(obsmetrics.FormatKey, format), + attribute.String(internal.FormatKey, format), attribute.Int64(acceptedItemsKey, int64(numAccepted)), attribute.Int64(refusedItemsKey, int64(numRefused)), ) @@ -200,20 +193,20 @@ func (rec *ObsReport) endOp( span.End() } -func (rec *ObsReport) recordMetrics(receiverCtx context.Context, dataType component.DataType, numAccepted, numRefused int) { +func (rec *ObsReport) recordMetrics(receiverCtx context.Context, signal pipeline.Signal, numAccepted, numRefused int) { var acceptedMeasure, refusedMeasure metric.Int64Counter - switch dataType { - case component.DataTypeTraces: + switch signal { + case pipeline.SignalTraces: acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedSpans refusedMeasure = rec.telemetryBuilder.ReceiverRefusedSpans - case component.DataTypeMetrics: + case pipeline.SignalMetrics: acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedMetricPoints refusedMeasure = rec.telemetryBuilder.ReceiverRefusedMetricPoints - case component.DataTypeLogs: + case pipeline.SignalLogs: acceptedMeasure = rec.telemetryBuilder.ReceiverAcceptedLogRecords refusedMeasure = rec.telemetryBuilder.ReceiverRefusedLogRecords } - acceptedMeasure.Add(receiverCtx, int64(numAccepted), metric.WithAttributes(rec.otelAttrs...)) - refusedMeasure.Add(receiverCtx, int64(numRefused), metric.WithAttributes(rec.otelAttrs...)) + acceptedMeasure.Add(receiverCtx, int64(numAccepted), rec.otelAttrs) + refusedMeasure.Add(receiverCtx, int64(numRefused), rec.otelAttrs) } diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE b/vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile b/vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go index 6951b5f41f4..148a025048c 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/contract_checker.go @@ -7,7 +7,7 @@ import ( "context" "errors" "fmt" - "math/rand" + "math/rand/v2" "sync" "sync/atomic" "testing" @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" ) @@ -54,8 +55,7 @@ type CheckConsumeContractParams struct { T *testing.T // Factory that allows to create a receiver. Factory receiver.Factory - // DataType to test for. - DataType component.DataType + Signal pipeline.Signal // Config of the receiver to use. Config component.Config // Generator that can send data to the receiver. @@ -111,13 +111,13 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun // Create and start the receiver. var receiver component.Component var err error - switch params.DataType { - case component.DataTypeLogs: - receiver, err = params.Factory.CreateLogsReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) - case component.DataTypeTraces: - receiver, err = params.Factory.CreateTracesReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) - case component.DataTypeMetrics: - receiver, err = params.Factory.CreateMetricsReceiver(ctx, NewNopCreateSettings(), params.Config, consumer) + switch params.Signal { + case pipeline.SignalLogs: + receiver, err = params.Factory.CreateLogs(ctx, NewNopSettings(), params.Config, consumer) + case pipeline.SignalTraces: + receiver, err = params.Factory.CreateTraces(ctx, NewNopSettings(), params.Config, consumer) + case pipeline.SignalMetrics: + receiver, err = params.Factory.CreateMetrics(ctx, NewNopSettings(), params.Config, consumer) default: require.FailNow(params.T, "must specify a valid DataType to test for") } @@ -148,7 +148,7 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun defer wg.Done() for atomic.AddInt64(&generatedIndex, 1) <= int64(params.GenerateCount) { ids := params.Generator.Generate() - require.Greater(params.T, len(ids), 0) + require.NotEmpty(params.T, ids) mux.Lock() duplicates := generatedIDs.mergeSlice(ids) @@ -193,7 +193,7 @@ func checkConsumeContractScenario(params CheckConsumeContractParams, decisionFun } err = receiver.Shutdown(ctx) - assert.NoError(params.T, err) + require.NoError(params.T, err) // Print some stats to help debug test failures. fmt.Printf( @@ -271,8 +271,10 @@ func (ds idSet) union(other idSet) (union idSet, duplicates []UniqueIDAttrVal) { // between the receiver and it next consumer. type consumeDecisionFunc func(ids idSet) error -var errNonPermanent = errors.New("non permanent error") -var errPermanent = errors.New("permanent error") +var ( + errNonPermanent = errors.New("non permanent error") + errPermanent = errors.New("permanent error") +) // randomNonPermanentErrorConsumeDecision is a decision function that succeeds approximately // half of the time and fails with a non-permanent error the rest of the time. diff --git a/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go b/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go index e9cec06ca1b..fab24a54bc7 100644 --- a/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go +++ b/vendor/go.opentelemetry.io/collector/receiver/receivertest/nop_receiver.go @@ -11,14 +11,17 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/xreceiver" ) var defaultComponentType = component.MustNewType("nop") -// NewNopCreateSettings returns a new nop settings for Create*Receiver functions. -func NewNopCreateSettings() receiver.CreateSettings { - return receiver.CreateSettings{ +// NewNopSettings returns a new nop settings for Create*Receiver functions. +func NewNopSettings() receiver.Settings { + return receiver.Settings{ ID: component.NewIDWithName(defaultComponentType, uuid.NewString()), TelemetrySettings: componenttest.NewNopTelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo(), @@ -27,44 +30,50 @@ func NewNopCreateSettings() receiver.CreateSettings { // NewNopFactory returns a receiver.Factory that constructs nop receivers supporting all data types. func NewNopFactory() receiver.Factory { - return receiver.NewFactory( + return xreceiver.NewFactory( defaultComponentType, func() component.Config { return &nopConfig{} }, - receiver.WithTraces(createTraces, component.StabilityLevelStable), - receiver.WithMetrics(createMetrics, component.StabilityLevelStable), - receiver.WithLogs(createLogs, component.StabilityLevelStable)) + xreceiver.WithTraces(createTraces, component.StabilityLevelStable), + xreceiver.WithMetrics(createMetrics, component.StabilityLevelStable), + xreceiver.WithLogs(createLogs, component.StabilityLevelStable), + xreceiver.WithProfiles(createProfiles, component.StabilityLevelAlpha), + ) } // NewNopFactoryForType returns a receiver.Factory that constructs nop receivers supporting only the // given data type. -func NewNopFactoryForType(dataType component.DataType) receiver.Factory { +func NewNopFactoryForType(signal pipeline.Signal) receiver.Factory { var factoryOpt receiver.FactoryOption - switch dataType { - case component.DataTypeTraces: + switch signal { + case pipeline.SignalTraces: factoryOpt = receiver.WithTraces(createTraces, component.StabilityLevelStable) - case component.DataTypeMetrics: + case pipeline.SignalMetrics: factoryOpt = receiver.WithMetrics(createMetrics, component.StabilityLevelStable) - case component.DataTypeLogs: + case pipeline.SignalLogs: factoryOpt = receiver.WithLogs(createLogs, component.StabilityLevelStable) default: - panic("unsupported data type for creating nop receiver factory: " + dataType.String()) + panic("unsupported data type for creating nop receiver factory: " + signal.String()) } - componentType := component.MustNewType(defaultComponentType.String() + "_" + dataType.String()) + componentType := component.MustNewType(defaultComponentType.String() + "_" + signal.String()) return receiver.NewFactory(componentType, func() component.Config { return &nopConfig{} }, factoryOpt) } type nopConfig struct{} -func createTraces(context.Context, receiver.CreateSettings, component.Config, consumer.Traces) (receiver.Traces, error) { +func createTraces(context.Context, receiver.Settings, component.Config, consumer.Traces) (receiver.Traces, error) { return nopInstance, nil } -func createMetrics(context.Context, receiver.CreateSettings, component.Config, consumer.Metrics) (receiver.Metrics, error) { +func createMetrics(context.Context, receiver.Settings, component.Config, consumer.Metrics) (receiver.Metrics, error) { return nopInstance, nil } -func createLogs(context.Context, receiver.CreateSettings, component.Config, consumer.Logs) (receiver.Logs, error) { +func createLogs(context.Context, receiver.Settings, component.Config, consumer.Logs) (receiver.Logs, error) { + return nopInstance, nil +} + +func createProfiles(context.Context, receiver.Settings, component.Config, xconsumer.Profiles) (xreceiver.Profiles, error) { return nopInstance, nil } @@ -75,11 +84,3 @@ type nopReceiver struct { component.StartFunc component.ShutdownFunc } - -// NewNopBuilder returns a receiver.Builder that constructs nop receivers. -func NewNopBuilder() *receiver.Builder { - nopFactory := NewNopFactory() - return receiver.NewBuilder( - map[component.ID]component.Config{component.NewID(defaultComponentType): nopFactory.CreateDefaultConfig()}, - map[component.Type]receiver.Factory{defaultComponentType: nopFactory}) -} diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile new file mode 100644 index 00000000000..ded7a36092d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go new file mode 100644 index 00000000000..2e0cbbd8cee --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/receiver/xreceiver/profiles.go @@ -0,0 +1,116 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package xreceiver // import "go.opentelemetry.io/collector/receiver/xreceiver" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/receiver" +) + +// Profiles receiver receives profiles. +// Its purpose is to translate data from any format to the collector's internal profile format. +// Profiles receiver feeds a xconsumer.Profiles with data. +// +// For example, it could be a pprof data source which translates pprof profiles into pprofile.Profiles. +type Profiles interface { + component.Component +} + +// Factory is a factory interface for receivers. +// +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + receiver.Factory + + // CreateProfiles creates a Profiles based on this config. + // If the receiver type does not support tracing or if the config is not valid + // an error will be returned instead. `next` is never nil. + CreateProfiles(ctx context.Context, set receiver.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) + + // ProfilesStability gets the stability level of the Profiles receiver. + ProfilesStability() component.StabilityLevel +} + +// CreateProfilesFunc is the equivalent of Factory.CreateProfiles. +type CreateProfilesFunc func(context.Context, receiver.Settings, component.Config, xconsumer.Profiles) (Profiles, error) + +// CreateProfiles implements Factory.CreateProfiles. +func (f CreateProfilesFunc) CreateProfiles(ctx context.Context, set receiver.Settings, cfg component.Config, next xconsumer.Profiles) (Profiles, error) { + if f == nil { + return nil, pipeline.ErrSignalNotSupported + } + return f(ctx, set, cfg, next) +} + +// FactoryOption apply changes to Factory. +type FactoryOption interface { + // applyOption applies the option. + applyOption(o *factoryOpts) +} + +// factoryOptionFunc is a FactoryOption created through a function. +type factoryOptionFunc func(*factoryOpts) + +func (f factoryOptionFunc) applyOption(o *factoryOpts) { + f(o) +} + +type factory struct { + receiver.Factory + CreateProfilesFunc + profilesStabilityLevel component.StabilityLevel +} + +func (f *factory) ProfilesStability() component.StabilityLevel { + return f.profilesStabilityLevel +} + +type factoryOpts struct { + opts []receiver.FactoryOption + *factory +} + +// WithTraces overrides the default "error not supported" implementation for Factory.CreateTraces and the default "undefined" stability level. +func WithTraces(createTraces receiver.CreateTracesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithTraces(createTraces, sl)) + }) +} + +// WithMetrics overrides the default "error not supported" implementation for Factory.CreateMetrics and the default "undefined" stability level. +func WithMetrics(createMetrics receiver.CreateMetricsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithMetrics(createMetrics, sl)) + }) +} + +// WithLogs overrides the default "error not supported" implementation for Factory.CreateLogs and the default "undefined" stability level. +func WithLogs(createLogs receiver.CreateLogsFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.opts = append(o.opts, receiver.WithLogs(createLogs, sl)) + }) +} + +// WithProfiles overrides the default "error not supported" implementation for Factory.CreateProfiles and the default "undefined" stability level. +func WithProfiles(createProfiles CreateProfilesFunc, sl component.StabilityLevel) FactoryOption { + return factoryOptionFunc(func(o *factoryOpts) { + o.profilesStabilityLevel = sl + o.CreateProfilesFunc = createProfiles + }) +} + +// NewFactory returns a Factory. +func NewFactory(cfgType component.Type, createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { + opts := factoryOpts{factory: &factory{}} + for _, opt := range options { + opt.applyOption(&opts) + } + opts.factory.Factory = receiver.NewFactory(cfgType, createDefaultConfig, opts.opts...) + return opts.factory +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go new file mode 100644 index 00000000000..2786da48c31 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_resource.go @@ -0,0 +1,1086 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // The platform on which the browser is running + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" + // Full user-agent string provided by the browser + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy navigator.userAgent + // API can be used. + AttributeBrowserUserAgent = "browser.user_agent" +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeCloudProvider = "cloud.provider" + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // The geographical region the resource is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The ARN of an ECS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + AttributeContainerImageTag = "container.image.tag" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" + // The name of the device manufacturer + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
    + //
  • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the faas.id attribute).
  • + //
+ AttributeFaaSName = "faas.name" + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting faas.id as a span attribute instead.The exact value to use + // for faas.id depends on the cloud provider:
    + //
  • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
  • + //
  • GCP: The URI of the resource
  • + //
  • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
  • + //
+ AttributeFaaSID = "faas.id" + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
    + //
  • AWS Lambda: The function version + // (an integer represented as a decimal string).
  • + //
  • Google Cloud Run: The revision + // (i.e., the function name plus the revision suffix).
  • + //
  • Google Cloud Functions: The value of the + // K_REVISION environment variable.
  • + //
  • Azure Functions: Not applicable. Do not set this attribute.
  • + //
+ AttributeFaaSVersion = "faas.version" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
    + //
  • AWS Lambda: Use the (full) log stream name.
  • + //
+ AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information. + AttributeFaaSMaxMemory = "faas.max_memory" +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostID = "host.id" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeHostArch = "host.arch" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // The version string of the VM image as defined in Version Attributes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeOSType = "os.type" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + AttributeProcessOwner = "process.owner" +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + AttributeServiceVersion = "service.version" +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetryAutoVersion = "telemetry.auto.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeBrowserBrands, + AttributeBrowserPlatform, + AttributeBrowserUserAgent, + AttributeCloudProvider, + AttributeCloudAccountID, + AttributeCloudRegion, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeAWSECSContainerARN, + AttributeAWSECSClusterARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupNames, + AttributeAWSLogGroupARNs, + AttributeAWSLogStreamNames, + AttributeAWSLogStreamARNs, + AttributeContainerName, + AttributeContainerID, + AttributeContainerRuntime, + AttributeContainerImageName, + AttributeContainerImageTag, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDeviceManufacturer, + AttributeFaaSName, + AttributeFaaSID, + AttributeFaaSVersion, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeHostID, + AttributeHostName, + AttributeHostType, + AttributeHostArch, + AttributeHostImageName, + AttributeHostImageID, + AttributeHostImageVersion, + AttributeK8SClusterName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodUID, + AttributeK8SPodName, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SReplicaSetUID, + AttributeK8SReplicaSetName, + AttributeK8SDeploymentUID, + AttributeK8SDeploymentName, + AttributeK8SStatefulSetUID, + AttributeK8SStatefulSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDaemonSetName, + AttributeK8SJobUID, + AttributeK8SJobName, + AttributeK8SCronJobUID, + AttributeK8SCronJobName, + AttributeOSType, + AttributeOSDescription, + AttributeOSName, + AttributeOSVersion, + AttributeProcessPID, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessCommand, + AttributeProcessCommandLine, + AttributeProcessCommandArgs, + AttributeProcessOwner, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessRuntimeDescription, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceInstanceID, + AttributeServiceVersion, + AttributeTelemetrySDKName, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKVersion, + AttributeTelemetryAutoVersion, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeWebEngineDescription, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go new file mode 100644 index 00000000000..ab9d27236a7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/generated_trace.go @@ -0,0 +1,1805 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from faas.id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span does not depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeDBSystem = "db.system" + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Required: Required, if applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + AttributeDBStatement = "db.statement" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, net.peer.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example above.It follows that an exception may still escape the + // scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // faas.trigger MUST be set.Clients invoking FaaS instances usually cannot set + // faas.trigger, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + AttributeFaaSTrigger = "faas.trigger" + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSExecution = "faas.execution" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + AttributeFaaSColdstart = "faas.coldstart" +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeNetTransport = "net.transport" + // Remote address of the peer (dotted decimal for IPv4 or RFC5952 for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + AttributeNetPeerIP = "net.peer.ip" + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + AttributeNetPeerPort = "net.peer.port" + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: net.peer.name SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + AttributeNetPeerName = "net.peer.name" + // Like net.peer.ip but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + AttributeNetHostIP = "net.host.ip" + // Like net.peer.port but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + AttributeNetHostPort = "net.host.port" + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + AttributeNetHostName = "net.host.name" + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + AttributeNetHostConnectionType = "net.host.connection.type" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + AttributeNetHostConnectionSubtype = "net.host.connection.subtype" + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + AttributeNetHostCarrierName = "net.host.carrier.name" + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + AttributeNetHostCarrierMcc = "net.host.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + AttributeNetHostCarrierMnc = "net.host.carrier.mnc" + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + AttributeNetHostCarrierIcc = "net.host.carrier.icc" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Another IP-based protocol + AttributeNetTransportIP = "ip" + // Unix Domain socket. See below + AttributeNetTransportUnix = "unix" + // Named or anonymous pipe. See note below + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +const ( + // wifi + AttributeNetHostConnectionTypeWifi = "wifi" + // wired + AttributeNetHostConnectionTypeWired = "wired" + // cell + AttributeNetHostConnectionTypeCell = "cell" + // unavailable + AttributeNetHostConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetHostConnectionTypeUnknown = "unknown" +) + +const ( + // GPRS + AttributeNetHostConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetHostConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetHostConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetHostConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetHostConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetHostConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetHostConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetHostConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetHostConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetHostConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetHostConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetHostConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetHostConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetHostConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetHostConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetHostConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetHostConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetHostConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetHostConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetHostConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetHostConnectionSubtypeLteCa = "lte_ca" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // Full HTTP request URL in the form scheme://host[:port]/path?query[#fragment]. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: http.url MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case the attribute's value + // should be https://www.example.com/. + AttributeHTTPURL = "http.url" + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + AttributeHTTPTarget = "http.target" + // The value of the HTTP host header. An empty Host header should also be + // reported, see note. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + // Note: When the header is present but empty the attribute SHOULD be set to the + // empty string. Note that this is a valid situation that is expected in certain + // cases, according the aforementioned section of RFC 7230. When the header is not + // set the attribute MUST NOT be set. + AttributeHTTPHost = "http.host" + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // HTTP response status code. + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If net.transport is not specified, it can be assumed to be IP.TCP except + // if http.flavor is QUIC, in which case IP.UDP is assumed. + AttributeHTTPFlavor = "http.flavor" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + AttributeHTTPUserAgent = "http.user_agent" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + AttributeHTTPRequestContentLengthUncompressed = "http.request_content_length_uncompressed" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + AttributeHTTPResponseContentLengthUncompressed = "http.response_content_length_uncompressed" + // The ordinal number of request re-sending attempt. + // + // Type: int + // Required: If and only if a request was retried. + // Stability: stable + // Examples: 3 + AttributeHTTPRetryCount = "http.retry_count" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( net.host.name should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: http.url is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + AttributeHTTPServerName = "http.server_name" + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + AttributeHTTPRoute = "http.route" + // The IP address of the original client behind all proxies, if known (e.g. from + // X-Forwarded-For). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as net.peer.ip, which would + // identify the network-level peer, which may be a proxy.This attribute should be + // set when a source of information different + // from the one used for net.peer.ip, is available even if that other + // source just confirms the same value as net.peer.ip. + // Rationale: For net.peer.ip, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // http.client_ip when it's the same as net.peer.ip means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + AttributeHTTPClientIP = "http.client_ip" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the RequestItems object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the Limit request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the IndexName request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The value of the Select request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Segment request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" + // The value of the Count response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + AttributeMessagingDestination = "messaging.destination" + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + AttributeMessagingDestinationKind = "messaging.destination_kind" + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + AttributeMessagingTempDestination = "messaging.temp_destination" + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + AttributeMessagingProtocol = "messaging.protocol" + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + AttributeMessagingProtocolVersion = "messaging.protocol_version" + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + AttributeMessagingURL = "messaging.url" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message_id" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + AttributeMessagingConversationID = "messaging.conversation_id" + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + AttributeMessagingMessagePayloadSizeBytes = "messaging.message_payload_size_bytes" + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + AttributeMessagingMessagePayloadCompressedSizeBytes = "messaging.message_payload_compressed_size_bytes" +) + +const ( + // A message sent to a queue + AttributeMessagingDestinationKindQueue = "queue" + // A message sent to a topic + AttributeMessagingDestinationKindTopic = "topic" +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // Operation names section above. If the operation is "send", this + // attribute MUST NOT be set, since the operation can be inferred from the span + // kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingOperation = "messaging.operation" + // The identifier for the consumer receiving a message. For Kafka, set it to + // {messaging.kafka.consumer_group} - {messaging.kafka.client_id}, if both are + // present, or only messaging.kafka.consumer_group. For brokers, such as RabbitMQ + // and Artemis, set it to the client_id of the client consuming the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mygroup - client-6' + AttributeMessagingConsumerID = "messaging.consumer_id" +) + +const ( + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + AttributeMessagingRabbitmqRoutingKey = "messaging.rabbitmq.routing_key" +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message_id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer_group" + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + AttributeMessagingKafkaClientID = "messaging.kafka.client_id" + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + AttributeMessagingKafkaPartition = "messaging.kafka.partition" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + AttributeMessagingKafkaTombstone = "messaging.kafka.tombstone" +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // The unique identifier for each client. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + AttributeMessagingRocketmqClientID = "messaging.rocketmq.client_id" + // Type of message. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message_type" + // The secondary classifier of message besides topic. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message_tag" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message_keys" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeRPCSystem = "rpc.system" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // error.code property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" +) + +// RPC received/sent message. +const ( + // Whether this is a received or sent message. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessageType = "message.type" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Required: No + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Compressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + AttributeMessageCompressedSize = "message.compressed_size" + // Uncompressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAWSLambdaInvokedARN, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventType, + AttributeCloudeventsEventSubject, + AttributeOpentracingRefType, + AttributeDBSystem, + AttributeDBConnectionString, + AttributeDBUser, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBStatement, + AttributeDBOperation, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraPageSize, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraTable, + AttributeDBCassandraIdempotence, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraCoordinatorDC, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBSQLTable, + AttributeExceptionType, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionEscaped, + AttributeFaaSTrigger, + AttributeFaaSExecution, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSDocumentName, + AttributeFaaSTime, + AttributeFaaSCron, + AttributeFaaSColdstart, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeNetTransport, + AttributeNetPeerIP, + AttributeNetPeerPort, + AttributeNetPeerName, + AttributeNetHostIP, + AttributeNetHostPort, + AttributeNetHostName, + AttributeNetHostConnectionType, + AttributeNetHostConnectionSubtype, + AttributeNetHostCarrierName, + AttributeNetHostCarrierMcc, + AttributeNetHostCarrierMnc, + AttributeNetHostCarrierIcc, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadID, + AttributeThreadName, + AttributeCodeFunction, + AttributeCodeNamespace, + AttributeCodeFilepath, + AttributeCodeLineNumber, + AttributeHTTPMethod, + AttributeHTTPURL, + AttributeHTTPTarget, + AttributeHTTPHost, + AttributeHTTPScheme, + AttributeHTTPStatusCode, + AttributeHTTPFlavor, + AttributeHTTPUserAgent, + AttributeHTTPRequestContentLength, + AttributeHTTPRequestContentLengthUncompressed, + AttributeHTTPResponseContentLength, + AttributeHTTPResponseContentLengthUncompressed, + AttributeHTTPRetryCount, + AttributeHTTPServerName, + AttributeHTTPRoute, + AttributeHTTPClientIP, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeMessagingSystem, + AttributeMessagingDestination, + AttributeMessagingDestinationKind, + AttributeMessagingTempDestination, + AttributeMessagingProtocol, + AttributeMessagingProtocolVersion, + AttributeMessagingURL, + AttributeMessagingMessageID, + AttributeMessagingConversationID, + AttributeMessagingMessagePayloadSizeBytes, + AttributeMessagingMessagePayloadCompressedSizeBytes, + AttributeMessagingOperation, + AttributeMessagingConsumerID, + AttributeMessagingRabbitmqRoutingKey, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaClientID, + AttributeMessagingKafkaPartition, + AttributeMessagingKafkaTombstone, + AttributeMessagingRocketmqNamespace, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqClientID, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqConsumptionModel, + AttributeRPCSystem, + AttributeRPCService, + AttributeRPCMethod, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcVersion, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeMessageType, + AttributeMessageID, + AttributeMessageCompressedSize, + AttributeMessageUncompressedSize, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go new file mode 100644 index 00000000000..e7798b15828 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/nonstandard.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.12.0" + +const ( + OtelLibraryName = "otel.library.name" + OtelLibraryVersion = "otel.library.version" + OtelStatusCode = "otel.status_code" + OtelStatusDescription = "otel.status_description" +) diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go new file mode 100644 index 00000000000..42bbd0976df --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.12.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.12.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Conventions packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go index f4922ef4497..af7c567fb6a 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.13.0/generated_resource.go @@ -393,12 +393,12 @@ const ( // Take care not to use the "invoked ARN" directly but replace any // alias suffix // with the resolved function version, as the same runtime instance may be - // invokable with + // invocable with // multiple different aliases. //
  • GCP: The URI of the resource
  • //
  • Azure: The Fully Qualified Resource ID of the invoked function, // not the function app, having the form - // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // /subscriptions//resourceGroups//providers/Microsoft.Web/s // ites//functions/. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go new file mode 100644 index 00000000000..3ab9f86d569 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_resource.go @@ -0,0 +1,1168 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // Full user-agent string provided by the browser + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy navigator.userAgent + // API can be used. + AttributeBrowserUserAgent = "browser.user_agent" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeCloudProvider = "cloud.provider" + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGoogleCloudOpenshift = "google_cloud_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Container image tag. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0.1' + AttributeContainerImageTag = "container.image.tag" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the faas.id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting faas.id as a span attribute instead.The exact value to use + // for faas.id depends on the cloud provider:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeFaaSID = "faas.id" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run: The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information. + AttributeFaaSMaxMemory = "faas.max_memory" +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized Linux systems, the machine-id located in + // /etc/machine-id or /var/lib/dbus/machine-id may be used. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeHostArch = "host.arch" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // The version string of the VM image as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeOSType = "os.type" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 1234 + AttributeProcessPID = "process.pid" + // Parent Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Conditionally Required - See alternative attributes below. + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'root' + AttributeProcessOwner = "process.owner" +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // The version string of the service API or implementation. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0' + AttributeServiceVersion = "service.version" +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'opentelemetry' + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetryAutoVersion = "telemetry.auto.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOtelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOtelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // Deprecated, use the otel.scope.name attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOtelLibraryName = "otel.library.name" + // Deprecated, use the otel.scope.version attribute. + // + // Type: string + // Requirement Level: Optional + // Stability: deprecated + // Examples: '1.0.0' + AttributeOtelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeBrowserBrands, + AttributeBrowserPlatform, + AttributeBrowserMobile, + AttributeBrowserUserAgent, + AttributeBrowserLanguage, + AttributeCloudProvider, + AttributeCloudAccountID, + AttributeCloudRegion, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeAWSECSContainerARN, + AttributeAWSECSClusterARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupNames, + AttributeAWSLogGroupARNs, + AttributeAWSLogStreamNames, + AttributeAWSLogStreamARNs, + AttributeContainerName, + AttributeContainerID, + AttributeContainerRuntime, + AttributeContainerImageName, + AttributeContainerImageTag, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDeviceManufacturer, + AttributeFaaSName, + AttributeFaaSID, + AttributeFaaSVersion, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeHostID, + AttributeHostName, + AttributeHostType, + AttributeHostArch, + AttributeHostImageName, + AttributeHostImageID, + AttributeHostImageVersion, + AttributeK8SClusterName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodUID, + AttributeK8SPodName, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SReplicaSetUID, + AttributeK8SReplicaSetName, + AttributeK8SDeploymentUID, + AttributeK8SDeploymentName, + AttributeK8SStatefulSetUID, + AttributeK8SStatefulSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDaemonSetName, + AttributeK8SJobUID, + AttributeK8SJobName, + AttributeK8SCronJobUID, + AttributeK8SCronJobName, + AttributeOSType, + AttributeOSDescription, + AttributeOSName, + AttributeOSVersion, + AttributeProcessPID, + AttributeProcessParentPID, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessCommand, + AttributeProcessCommandLine, + AttributeProcessCommandArgs, + AttributeProcessOwner, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessRuntimeDescription, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceInstanceID, + AttributeServiceVersion, + AttributeTelemetrySDKName, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKVersion, + AttributeTelemetryAutoVersion, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeWebEngineDescription, + AttributeOtelScopeName, + AttributeOtelScopeVersion, + AttributeOtelLibraryName, + AttributeOtelLibraryVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go new file mode 100644 index 00000000000..33912ab9a58 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/generated_trace.go @@ -0,0 +1,1913 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This document defines the shared attributes used to report a single exception associated with a span or log. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" +) + +// This document defines attributes for Events represented using Log Records. +const ( + // The name identifies the event. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'click', 'exception' + AttributeEventName = "event.name" + // The domain identifies the business context for the events. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Note: Events across different domains may have same event.name, yet be + // unrelated events. + AttributeEventDomain = "event.domain" +) + +const ( + // Events from browser apps + AttributeEventDomainBrowser = "browser" + // Events from mobile apps + AttributeEventDomainDevice = "device" + // Events from Kubernetes + AttributeEventDomainK8S = "k8s" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from faas.id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span does not depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeDBSystem = "db.system" + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Conditionally Required - If applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The database statement being executed. + // + // Type: string + // Requirement Level: Conditionally Required - If applicable and not explicitly + // disabled via instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + AttributeDBStatement = "db.statement" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Conditionally Required - If `db.statement` is not + // applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, net.peer.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Conditionally Required - If other than the default database + // (`0`). + // Stability: stable + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOtelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOtelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOtelStatusCodeOk = "OK" + // The operation contains an error + AttributeOtelStatusCodeError = "ERROR" +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // faas.trigger MUST be set.Clients invoking FaaS instances usually cannot set + // faas.trigger, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + AttributeFaaSTrigger = "faas.trigger" + // The execution ID of the current function execution. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSExecution = "faas.execution" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeFaaSColdstart = "faas.coldstart" +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Conditionally Required - For some cloud providers, like AWS + // or GCP, the region in which a function is hosted is essential to uniquely + // identify the function and also part of its endpoint. Since it's part of the + // endpoint being called, the region is always known to clients. In these cases, + // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the + // client or not required for identifying the invoked function, setting + // `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeNetTransport = "net.transport" + // Application layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + AttributeNetAppProtocolName = "net.app.protocol.name" + // Version of the application layer protocol used. See note below. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: net.app.protocol.version refers to the version of the protocol used and + // might be different from the protocol client's version. If the HTTP client used + // has a version of 0.27.2, but sends HTTP version 1.1, this attribute should be + // set to 1.1. + AttributeNetAppProtocolVersion = "net.app.protocol.version" + // Remote socket peer name. + // + // Type: string + // Requirement Level: Recommended - If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set. + // Stability: stable + // Examples: 'proxy.example.com' + AttributeNetSockPeerName = "net.sock.peer.name" + // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, etc. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + AttributeNetSockPeerAddr = "net.sock.peer.addr" + // Remote socket peer port. + // + // Type: int + // Requirement Level: Recommended - If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set. + // Stability: stable + // Examples: 16456 + AttributeNetSockPeerPort = "net.sock.peer.port" + // Protocol address family which is used for communication. + // + // Type: Enum + // Requirement Level: Conditionally Required - If different than `inet` and if any + // of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry + // SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document. + // Stability: stable + // Examples: 'inet6', 'bluetooth' + AttributeNetSockFamily = "net.sock.family" + // Logical remote hostname, see note below. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com' + // Note: net.peer.name SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + AttributeNetPeerName = "net.peer.name" + // Logical remote port number + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + AttributeNetPeerPort = "net.peer.port" + // Logical local hostname or similar, see note below. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'localhost' + AttributeNetHostName = "net.host.name" + // Logical local port number, preferably the one that the peer used to connect + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 8080 + AttributeNetHostPort = "net.host.port" + // Local socket address. Useful in case of a multi-IP host. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '192.168.0.1' + AttributeNetSockHostAddr = "net.sock.host.addr" + // Local socket port number. + // + // Type: int + // Requirement Level: Recommended - If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set. + // Stability: stable + // Examples: 35555 + AttributeNetSockHostPort = "net.sock.host.port" + // The internet connection type currently being used by the host. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'wifi' + AttributeNetHostConnectionType = "net.host.connection.type" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'LTE' + AttributeNetHostConnectionSubtype = "net.host.connection.subtype" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'sprint' + AttributeNetHostCarrierName = "net.host.carrier.name" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '310' + AttributeNetHostCarrierMcc = "net.host.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '001' + AttributeNetHostCarrierMnc = "net.host.carrier.mnc" + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'DE' + AttributeNetHostCarrierIcc = "net.host.carrier.icc" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Named or anonymous pipe. See note below + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +const ( + // IPv4 address + AttributeNetSockFamilyInet = "inet" + // IPv6 address + AttributeNetSockFamilyInet6 = "inet6" + // Unix domain socket path + AttributeNetSockFamilyUnix = "unix" +) + +const ( + // wifi + AttributeNetHostConnectionTypeWifi = "wifi" + // wired + AttributeNetHostConnectionTypeWired = "wired" + // cell + AttributeNetHostConnectionTypeCell = "cell" + // unavailable + AttributeNetHostConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetHostConnectionTypeUnknown = "unknown" +) + +const ( + // GPRS + AttributeNetHostConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetHostConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetHostConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetHostConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetHostConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetHostConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetHostConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetHostConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetHostConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetHostConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetHostConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetHostConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetHostConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetHostConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetHostConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetHostConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetHostConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetHostConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetHostConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetHostConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetHostConnectionSubtypeLteCa = "lte_ca" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // HTTP response status code. + // + // Type: int + // Requirement Level: Conditionally Required - If and only if one was + // received/sent. + // Stability: stable + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Kind of HTTP protocol used. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Note: If net.transport is not specified, it can be assumed to be IP.TCP except + // if http.flavor is QUIC, in which case IP.UDP is assumed. + AttributeHTTPFlavor = "http.flavor" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + AttributeHTTPUserAgent = "http.user_agent" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Semantic Convention for HTTP Client +const ( + // Full HTTP request URL in the form scheme://host[:port]/path?query[#fragment]. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: http.url MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case the attribute's value + // should be https://www.example.com/. + AttributeHTTPURL = "http.url" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Recommended - if and only if request was retried. + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPResendCount = "http.resend_count" +) + +// Semantic Convention for HTTP Server +const ( + // The URI scheme identifying the used protocol. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + AttributeHTTPTarget = "http.target" + // The matched route (path template in the format used by the respective server + // framework). See note below + // + // Type: string + // Requirement Level: Conditionally Required - If and only if it's available + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and the URI + // path can NOT substitute it. + AttributeHTTPRoute = "http.route" + // The IP address of the original client behind all proxies, if known (e.g. from + // X-Forwarded-For). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as net.sock.peer.addr, which would + // identify the network-level peer, which may be a proxy.This attribute should be + // set when a source of information different + // from the one used for net.sock.peer.addr, is available even if that other + // source just confirms the same value as net.sock.peer.addr. + // Rationale: For net.sock.peer.addr, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // http.client_ip when it's the same as net.sock.peer.addr means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + AttributeHTTPClientIP = "http.client_ip" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. +const ( + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + AttributeMessagingDestination = "messaging.destination" + // The kind of message destination + // + // Type: Enum + // Requirement Level: Conditionally Required - If the message destination is + // either a `queue` or `topic`. + // Stability: stable + AttributeMessagingDestinationKind = "messaging.destination_kind" + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Requirement Level: Conditionally Required - If value is `true`. When missing, + // the value is assumed to be `false`. + // Stability: stable + AttributeMessagingTempDestination = "messaging.temp_destination" + // The name of the transport protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'AMQP', 'MQTT' + AttributeMessagingProtocol = "messaging.protocol" + // The version of the transport protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '0.9.1' + AttributeMessagingProtocolVersion = "messaging.protocol_version" + // Connection string. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + AttributeMessagingURL = "messaging.url" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message_id" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'MyConversationID' + AttributeMessagingConversationID = "messaging.conversation_id" + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 2738 + AttributeMessagingMessagePayloadSizeBytes = "messaging.message_payload_size_bytes" + // The compressed size of the message payload in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 2048 + AttributeMessagingMessagePayloadCompressedSizeBytes = "messaging.message_payload_compressed_size_bytes" +) + +const ( + // A message sent to a queue + AttributeMessagingDestinationKindQueue = "queue" + // A message sent to a topic + AttributeMessagingDestinationKindTopic = "topic" +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // Operation names section above. If the operation is "send", this + // attribute MUST NOT be set, since the operation can be inferred from the span + // kind in that case. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeMessagingOperation = "messaging.operation" + // The identifier for the consumer receiving a message. For Kafka, set it to + // {messaging.kafka.consumer_group} - {messaging.kafka.client_id}, if both are + // present, or only messaging.kafka.consumer_group. For brokers, such as RabbitMQ + // and Artemis, set it to the client_id of the client consuming the message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + AttributeMessagingConsumerID = "messaging.consumer_id" +) + +const ( + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Conditionally Required - If not empty. + // Stability: stable + // Examples: 'myKey' + AttributeMessagingRabbitmqRoutingKey = "messaging.rabbitmq.routing_key" +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message_id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer_group" + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client-5' + AttributeMessagingKafkaClientID = "messaging.kafka.client_id" + // Partition the message is sent to. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 2 + AttributeMessagingKafkaPartition = "messaging.kafka.partition" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Conditionally Required - If value is `true`. When missing, + // the value is assumed to be `false`. + // Stability: stable + AttributeMessagingKafkaTombstone = "messaging.kafka.tombstone" +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // The unique identifier for each client. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + AttributeMessagingRocketmqClientID = "messaging.rocketmq.client_id" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Conditionally Required - If the message type is delay and + // delay time level is not specified. + // Stability: stable + // Examples: 1665987217045 + AttributeMessagingRocketmqDeliveryTimestamp = "messaging.rocketmq.delivery_timestamp" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Conditionally Required - If the message type is delay and + // delivery timestamp is not specified. + // Stability: stable + // Examples: 3 + AttributeMessagingRocketmqDelayTimeLevel = "messaging.rocketmq.delay_time_level" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Conditionally Required - If the message type is FIFO. + // Stability: stable + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message_group" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message_type" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message_tag" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message_keys" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeRPCSystem = "rpc.system" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Conditionally Required - If other than the default version + // (`1.0`) + // Stability: stable + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Conditionally Required - If response is not successful. + // Stability: stable + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeExceptionType, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeEventName, + AttributeEventDomain, + AttributeAWSLambdaInvokedARN, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventType, + AttributeCloudeventsEventSubject, + AttributeOpentracingRefType, + AttributeDBSystem, + AttributeDBConnectionString, + AttributeDBUser, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBStatement, + AttributeDBOperation, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraPageSize, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraTable, + AttributeDBCassandraIdempotence, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraCoordinatorDC, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBSQLTable, + AttributeOtelStatusCode, + AttributeOtelStatusDescription, + AttributeFaaSTrigger, + AttributeFaaSExecution, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSDocumentName, + AttributeFaaSTime, + AttributeFaaSCron, + AttributeFaaSColdstart, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeNetTransport, + AttributeNetAppProtocolName, + AttributeNetAppProtocolVersion, + AttributeNetSockPeerName, + AttributeNetSockPeerAddr, + AttributeNetSockPeerPort, + AttributeNetSockFamily, + AttributeNetPeerName, + AttributeNetPeerPort, + AttributeNetHostName, + AttributeNetHostPort, + AttributeNetSockHostAddr, + AttributeNetSockHostPort, + AttributeNetHostConnectionType, + AttributeNetHostConnectionSubtype, + AttributeNetHostCarrierName, + AttributeNetHostCarrierMcc, + AttributeNetHostCarrierMnc, + AttributeNetHostCarrierIcc, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadID, + AttributeThreadName, + AttributeCodeFunction, + AttributeCodeNamespace, + AttributeCodeFilepath, + AttributeCodeLineNumber, + AttributeHTTPMethod, + AttributeHTTPStatusCode, + AttributeHTTPFlavor, + AttributeHTTPUserAgent, + AttributeHTTPRequestContentLength, + AttributeHTTPResponseContentLength, + AttributeHTTPURL, + AttributeHTTPResendCount, + AttributeHTTPScheme, + AttributeHTTPTarget, + AttributeHTTPRoute, + AttributeHTTPClientIP, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeGraphqlDocument, + AttributeMessagingSystem, + AttributeMessagingDestination, + AttributeMessagingDestinationKind, + AttributeMessagingTempDestination, + AttributeMessagingProtocol, + AttributeMessagingProtocolVersion, + AttributeMessagingURL, + AttributeMessagingMessageID, + AttributeMessagingConversationID, + AttributeMessagingMessagePayloadSizeBytes, + AttributeMessagingMessagePayloadCompressedSizeBytes, + AttributeMessagingOperation, + AttributeMessagingConsumerID, + AttributeMessagingRabbitmqRoutingKey, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaClientID, + AttributeMessagingKafkaPartition, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaTombstone, + AttributeMessagingRocketmqNamespace, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqClientID, + AttributeMessagingRocketmqDeliveryTimestamp, + AttributeMessagingRocketmqDelayTimeLevel, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqConsumptionModel, + AttributeRPCSystem, + AttributeRPCService, + AttributeRPCMethod, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcVersion, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go new file mode 100644 index 00000000000..9346ba9dcd5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/nonstandard.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.16.0" + +const ( + OtelLibraryName = "otel.library.name" + OtelLibraryVersion = "otel.library.version" + OtelStatusCode = "otel.status_code" + OtelStatusDescription = "otel.status_description" +) diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go new file mode 100644 index 00000000000..bb438059e67 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.16.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.16.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Conventions packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.16.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go index f2a04b8666e..049f689b978 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.18.0/generated_resource.go @@ -423,12 +423,12 @@ const ( // Take care not to use the "invoked ARN" directly but replace any // alias suffix // with the resolved function version, as the same runtime instance may be - // invokable with + // invocable with // multiple different aliases.
  • //
  • GCP: The URI of the resource
  • //
  • Azure: The Fully Qualified Resource ID of the invoked function, // not the function app, having the form - // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // /subscriptions//resourceGroups//providers/Microsoft.Web/s // ites//functions/. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go new file mode 100644 index 00000000000..ad9e94a49f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.25.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.25.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go new file mode 100644 index 00000000000..5bd8f5b8fac --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_attribute_group.go @@ -0,0 +1,4796 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Describes Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// A file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// Describes Database attributes +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // should use a combination of server.address and server.port attributes formatted + // as server.address:server.port. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'myDataSource' + AttributePoolName = "pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Examples: 'idle' + AttributeState = "state" +) + +const ( + // idle + AttributeStateIdle = "idle" + // used + AttributeStateUsed = "used" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Conditionally Required - if the matched endpoint for the + // request had a rate-limiting policy. + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Conditionally Required - if and only if the request was not + // handled. + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Conditionally Required - If and only if a route was + // successfully matched. + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// Describes JVM buffer metric attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" +) + +// Describes JVM memory metric attributes. +const ( + // Name of the memory pool. + // + // Type: string + // Requirement Level: Recommended + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Recommended + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +// Attributes for process CPU metrics. +const ( + // The CPU state for this data point. A process SHOULD be characterized either by + // data points with no state labels, or only data points with state labels. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessCPUState = "process.cpu.state" +) + +const ( + // system + AttributeProcessCPUStateSystem = "system" + // user + AttributeProcessCPUStateUser = "user" + // wait + AttributeProcessCPUStateWait = "wait" +) + +// Describes System metric attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU metric attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" + // The CPU state for this data point. A system's CPU SHOULD be characterized + // either by data points with no state labels, or only data points with state + // labels. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + AttributeSystemCPUState = "system.cpu.state" +) + +const ( + // user + AttributeSystemCPUStateUser = "user" + // system + AttributeSystemCPUStateSystem = "system" + // nice + AttributeSystemCPUStateNice = "nice" + // idle + AttributeSystemCPUStateIdle = "idle" + // iowait + AttributeSystemCPUStateIowait = "iowait" + // interrupt + AttributeSystemCPUStateInterrupt = "interrupt" + // steal + AttributeSystemCPUStateSteal = "steal" +) + +// Describes System Memory metric attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging metric attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem metric attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network metric attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process metric attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // The CPU state for this data point. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + AttributeContainerCPUState = "container.cpu.state" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +const ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + AttributeContainerCPUStateUser = "user" + // When CPU is used by the system (host OS) + AttributeContainerCPUStateSystem = "system" + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + AttributeContainerCPUStateKernel = "kernel" +) + +// The attributes used to describe telemetry in the context of databases. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The name of the primary Cassandra table that the operation is acting upon, + // including the keyspace name (if applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // Cosmos DB container name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'anystring' + AttributeDBCosmosDBContainer = "db.cosmosdb.container" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" + // Represents the identifier of an Elasticsearch cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + AttributeDBElasticsearchClusterName = "db.elasticsearch.cluster.name" + // An identifier (address, unique name, or any other identifier) of the database + // instance that is executing queries or mutations on the current connection. This + // is useful in cases where the database is running in a clustered environment and + // the instrumentation is able to record the node executing the query. The client + // may obtain this value in databases like MySQL using queries like select + // @@hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mysql-e26b99z.example.com' + AttributeDBInstanceID = "db.instance.id" + // The MongoDB collection being accessed within the database stated in db.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, server.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema name), the + // database name to be used is the more specific layer (e.g. Oracle schema name). + AttributeDBName = "db.name" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" + // The database statement being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + AttributeDBStatement = "db.statement" + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBSystem = "db.system" + // Username for accessing the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment does not affect the uniqueness constraints defined + // through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
      + //
    • service.name=frontend, deployment.environment=production
    • + //
    • service.name=frontend, deployment.environment=staging.
    • + //
    + AttributeDeploymentEnvironment = "deployment.environment" +) + +// "Describes deprecated db attributes." +const ( + // Deprecated, use server.address, server.port attributes instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: "Replaced by `server.address` and `server.port`." + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Deprecated, use db.instance.id instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `db.instance.id`. + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" + // Removed, no replacement at this time. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Removed as not used. + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" +) + +// Describes deprecated HTTP attributes. +const ( + // Deprecated, use network.protocol.name instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.name`. + AttributeHTTPFlavor = "http.flavor" + // Deprecated, use http.request.method instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.request.method`. + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // Deprecated, use http.request.header.content-length instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.request.header.content-length`. + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // Deprecated, use http.response.header.content-length instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.response.header.content-length`. + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" + // Deprecated, use url.scheme instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `url.scheme` instead. + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // Deprecated, use http.response.status_code instead. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `http.response.status_code`. + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Deprecated, use url.path and url.query instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Split to `url.path` and `url.query. + // Examples: '/search?q=OpenTelemetry#SemConv' + AttributeHTTPTarget = "http.target" + // Deprecated, use url.full instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `url.full`. + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + AttributeHTTPURL = "http.url" + // Deprecated, use user_agent.original instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `user_agent.original`. + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1' + AttributeHTTPUserAgent = "http.user_agent" +) + +const ( + // HTTP/1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP/1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP/2 + AttributeHTTPFlavorHTTP20 = "2.0" + // HTTP/3 + AttributeHTTPFlavorHTTP30 = "3.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Describes deprecated messaging attributes. +const ( + // "Deprecated, use messaging.destination.partition.id instead." + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `messaging.destination.partition.id`. + // Examples: 2 + AttributeMessagingKafkaDestinationPartition = "messaging.kafka.destination.partition" +) + +// These attributes may be used for any network related operation. +const ( + // Deprecated, use server.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.address`. + // Examples: 'example.com' + AttributeNetHostName = "net.host.name" + // Deprecated, use server.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.port`. + // Examples: 8080 + AttributeNetHostPort = "net.host.port" + // Deprecated, use server.address on client spans and client.address on server + // spans. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.address` on client spans and `client.address` + // on server spans. + // Examples: 'example.com' + AttributeNetPeerName = "net.peer.name" + // Deprecated, use server.port on client spans and client.port on server spans. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `server.port` on client spans and `client.port` on + // server spans. + // Examples: 8080 + AttributeNetPeerPort = "net.peer.port" + // Deprecated, use network.protocol.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.name`. + // Examples: 'amqp', 'http', 'mqtt' + AttributeNetProtocolName = "net.protocol.name" + // Deprecated, use network.protocol.version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.protocol.version`. + // Examples: '3.1.1' + AttributeNetProtocolVersion = "net.protocol.version" + // Deprecated, use network.transport and network.type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Split to `network.transport` and `network.type`. + AttributeNetSockFamily = "net.sock.family" + // Deprecated, use network.local.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.local.address`. + // Examples: '/var/my.sock' + AttributeNetSockHostAddr = "net.sock.host.addr" + // Deprecated, use network.local.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.local.port`. + // Examples: 8080 + AttributeNetSockHostPort = "net.sock.host.port" + // Deprecated, use network.peer.address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.peer.address`. + // Examples: '192.168.0.1' + AttributeNetSockPeerAddr = "net.sock.peer.addr" + // Deprecated, no replacement at this time. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Removed. + // Examples: '/var/my.sock' + AttributeNetSockPeerName = "net.sock.peer.name" + // Deprecated, use network.peer.port. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.peer.port`. + // Examples: 65531 + AttributeNetSockPeerPort = "net.sock.peer.port" + // Deprecated, use network.transport. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `network.transport`. + AttributeNetTransport = "net.transport" +) + +const ( + // IPv4 address + AttributeNetSockFamilyInet = "inet" + // IPv6 address + AttributeNetSockFamilyInet6 = "inet6" + // Unix domain socket path + AttributeNetSockFamilyUnix = "unix" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Named or anonymous pipe + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +// Deprecated system attributes. +const ( + // Deprecated, use system.process.status instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Deprecated: Replaced by `system.process.status`. + // Examples: 'running' + AttributeSystemProcessesStatus = "system.processes.status" +) + +const ( + // running + AttributeSystemProcessesStatusRunning = "running" + // sleeping + AttributeSystemProcessesStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessesStatusStopped = "stopped" + // defunct + AttributeSystemProcessesStatusDefunct = "defunct" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable and SHOULD have low cardinality. + // Instrumentations SHOULD document the list of errors they report.The cardinality + // of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
      + //
    • Use a domain-specific attribute
    • + //
    • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
    • + //
    + AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
    + // If generated according to one of the mechanisms defined in Rec.
    + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client_id" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // A boolean that is true if the publish message destination is anonymous (could + // be unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationPublishAnonymous = "messaging.destination_publish.anonymous" + // The name of the original destination the message was published to + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker doesn't have such notion, the original destination name SHOULD + // uniquely identify the broker. + AttributeMessagingDestinationPublishName = "messaging.destination_publish.name" + // The name of the consumer group the event consumer is associated with. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'indexer' + AttributeMessagingEventhubsConsumerGroup = "messaging.eventhubs.consumer.group" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer.group" + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // A string identifying the kind of messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperation = "messaging.operation" + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" + // The name of the subscription in the topic messages are received from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mySubscription' + AttributeMessagingServicebusDestinationSubscriptionName = "messaging.servicebus.destination.subscription_name" + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" + // An identifier for the messaging system being used. See below for a list of + // well-known identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationPublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationReceive = "receive" + // One or more messages are delivered to or processed by a consumer + AttributeMessagingOperationDeliver = "process" + // One or more messages are settled + AttributeMessagingOperationSettle = "settle" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Also called an SNI, this tells the server which hostname to which the client is + // attempting to connect to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + AttributeTLSClientServerName = "tls.client.server_name" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeEventName, + AttributeLogRecordUID, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributePoolName, + AttributeState, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeJvmBufferPoolName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeProcessCPUState, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemCPUState, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeAndroidOSAPILevel, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerCPUState, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraTable, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBContainer, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchClusterName, + AttributeDBInstanceID, + AttributeDBMongoDBCollection, + AttributeDBMSSQLInstanceName, + AttributeDBName, + AttributeDBOperation, + AttributeDBRedisDBIndex, + AttributeDBSQLTable, + AttributeDBStatement, + AttributeDBSystem, + AttributeDBUser, + AttributeDeploymentEnvironment, + AttributeDBConnectionString, + AttributeDBElasticsearchNodeName, + AttributeDBJDBCDriverClassname, + AttributeHTTPFlavor, + AttributeHTTPMethod, + AttributeHTTPRequestContentLength, + AttributeHTTPResponseContentLength, + AttributeHTTPScheme, + AttributeHTTPStatusCode, + AttributeHTTPTarget, + AttributeHTTPURL, + AttributeHTTPUserAgent, + AttributeMessagingKafkaDestinationPartition, + AttributeNetHostName, + AttributeNetHostPort, + AttributeNetPeerName, + AttributeNetPeerPort, + AttributeNetProtocolName, + AttributeNetProtocolVersion, + AttributeNetSockFamily, + AttributeNetSockHostAddr, + AttributeNetSockHostPort, + AttributeNetSockPeerAddr, + AttributeNetSockPeerName, + AttributeNetSockPeerPort, + AttributeNetTransport, + AttributeSystemProcessesStatus, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeErrorType, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingDestinationPublishAnonymous, + AttributeMessagingDestinationPublishName, + AttributeMessagingEventhubsConsumerGroup, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperation, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingServicebusDestinationSubscriptionName, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingSystem, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessOwner, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSourceAddress, + AttributeSourcePort, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientServerName, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go new file mode 100644 index 00000000000..9ce1307afe1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_event.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// This event represents an occurrence of a lifecycle transition on the iOS +// platform. +const ( + // This attribute represents the state the application has transitioned into at + // the occurrence of the event. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: The iOS lifecycle states are defined in the UIApplicationDelegate + // documentation, and from which the OS terminology column values are derived. + AttributeIosState = "ios.state" +) + +const ( + // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` + AttributeIosStateActive = "active" + // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` + AttributeIosStateInactive = "inactive" + // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` + AttributeIosStateBackground = "background" + // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` + AttributeIosStateForeground = "foreground" + // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` + AttributeIosStateTerminate = "terminate" +) + +// This event represents an occurrence of a lifecycle transition on the Android +// platform. +const ( + // This attribute represents the state the application has transitioned into at + // the occurrence of the event. + // + // Type: Enum + // Requirement Level: Required + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// RPC received/sent message. +const ( + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageCompressedSize = "message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeMessageID = "message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessageType = "message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeMessageUncompressedSize = "message.uncompressed_size" +) + +const ( + // sent + AttributeMessageTypeSent = "SENT" + // received + AttributeMessageTypeReceived = "RECEIVED" +) + +func GetEventSemanticConventionAttributeNames() []string { + return []string{ + AttributeIosState, + AttributeAndroidState, + AttributeMessageCompressedSize, + AttributeMessageID, + AttributeMessageType, + AttributeMessageUncompressedSize, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go new file mode 100644 index 00000000000..7b355e40661 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_resource.go @@ -0,0 +1,242 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Heroku dyno metadata +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Requirement Level: Required + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // None + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: use the `otel.scope.name` attribute. + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelLibraryName = "otel.library.name" + // None + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Deprecated: use the `otel.scope.version` attribute. + // Examples: '1.0.0' + AttributeOTelLibraryVersion = "otel.library.version" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeWebEngineName, + AttributeWebEngineDescription, + AttributeWebEngineVersion, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributeOTelLibraryName, + AttributeOTelLibraryVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go new file mode 100644 index 00000000000..b214a9f4fb0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/generated_trace.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes that exist for S3 request types. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • delete-object
    • + //
    • get-object
    • + //
    • head-object
    • + //
    • put-object
    • + //
    • restore-object
    • + //
    • select-object-content
    • + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • create-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
      + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributePeerService, + AttributeAWSLambdaInvokedARN, + AttributeOpentracingRefType, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeAWSRequestID, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go new file mode 100644 index 00000000000..3739fe9ef81 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.25.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.25.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.25.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go new file mode 100644 index 00000000000..c8bb8c57e6a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go new file mode 100644 index 00000000000..98e34f3b9f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_attribute_group.go @@ -0,0 +1,5331 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // ASP.NET Core exception middleware handling result + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AttributeAspnetcoreDiagnosticsExceptionResult = "aspnetcore.diagnostics.exception.result" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" + // Match result - success or failure + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'success', 'failure' + AttributeAspnetcoreRoutingMatchStatus = "aspnetcore.routing.match_status" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +const ( + // Exception was handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultHandled = "handled" + // Exception was not handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultUnhandled = "unhandled" + // Exception handling was skipped because the response had started + AttributeAspnetcoreDiagnosticsExceptionResultSkipped = "skipped" + // Exception handling didn't run because the request was aborted + AttributeAspnetcoreDiagnosticsExceptionResultAborted = "aborted" +) + +const ( + // Match succeeded + AttributeAspnetcoreRoutingMatchStatusSuccess = "success" + // Match failed + AttributeAspnetcoreRoutingMatchStatusFailure = "failure" +) + +// Generic attributes for AWS services. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Attributes for AWS Logs. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Attributes for AWS Lambda. +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Attributes for AWS S3. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • delete-object
    • + //
    • get-object
    • + //
    • head-object
    • + //
    • put-object
    • + //
    • restore-object
    • + //
    • select-object-content
    • + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • create-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
      + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // The CPU state for this data point. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + AttributeContainerCPUState = "container.cpu.state" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +const ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + AttributeContainerCPUStateUser = "user" + // When CPU is used by the system (host OS) + AttributeContainerCPUStateSystem = "system" + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + AttributeContainerCPUStateKernel = "kernel" +) + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // should use a combination of server.address and server.port attributes formatted + // as server.address:server.port. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myDataSource' + AttributeDBClientConnectionsPoolName = "db.client.connections.pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle' + AttributeDBClientConnectionsState = "db.client.connections.state" + // The name of a collection (table, container) within the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match the + // value provided in the query and may be qualified with the schema and database + // name. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + AttributeDBCollectionName = "db.collection.name" + // The name of the database, fully qualified within the server address and port. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated (potentially using database system specific conventions) from most + // general to most specific namespace component, and more specific namespaces + // SHOULD NOT be captured without the more general namespaces, to ensure that + // "startswith" queries for the more general namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document what + // db.namespace means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + AttributeDBNamespace = "db.namespace" + // The name of the operation or command being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + AttributeDBOperationName = "db.operation.name" + // The database query being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey "WuValue"' + AttributeDBQueryText = "db.query.text" + // The database management system (DBMS) product as identified by the client + // instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the db.system is set to postgresql based on the instrumentation's best + // knowledge. + AttributeDBSystem = "db.system" +) + +const ( + // idle + AttributeDBClientConnectionsStateIdle = "idle" + // used + AttributeDBClientConnectionsStateUsed = "used" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Microsoft SQL Server Compact + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // Trino + AttributeDBSystemTrino = "trino" +) + +// This group defines attributes for Cassandra. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// This group defines attributes for Azure Cosmos DB. +const ( + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +// This group defines attributes for Elasticsearch. +const ( + // Represents the identifier of an Elasticsearch cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + AttributeDBElasticsearchClusterName = "db.elasticsearch.cluster.name" + // Represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment does not affect the uniqueness constraints defined + // through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
      + //
    • service.name=frontend, deployment.environment=production
    • + //
    • service.name=frontend, deployment.environment=staging.
    • + //
    + AttributeDeploymentEnvironment = "deployment.environment" +) + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // Deprecated use the device.app.lifecycle event definition including + // android.state as a payload field instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable, and SHOULD have low + // cardinality.When error.type is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used.Instrumentations SHOULD document the list of errors they report.The + // cardinality of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
      + //
    • Use a domain-specific attribute
    • + //
    • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
    • + //
    + AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // The full response received from the LLM. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching OpenAI + // messages format + AttributeGenAiCompletion = "gen_ai.completion" + // The full prompt sent to an LLM. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching OpenAI + // messages format + AttributeGenAiPrompt = "gen_ai.prompt" + // The maximum number of tokens the LLM generates for a request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiRequestMaxTokens = "gen_ai.request.max_tokens" + // The name of the LLM a request is being made to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4' + AttributeGenAiRequestModel = "gen_ai.request.model" + // The temperature setting for the LLM request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.0 + AttributeGenAiRequestTemperature = "gen_ai.request.temperature" + // The top_p sampling setting for the LLM request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0 + AttributeGenAiRequestTopP = "gen_ai.request.top_p" + // Array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'stop' + AttributeGenAiResponseFinishReasons = "gen_ai.response.finish_reasons" + // The unique identifier for the completion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + AttributeGenAiResponseID = "gen_ai.response.id" + // The name of the LLM a response was generated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + AttributeGenAiResponseModel = "gen_ai.response.model" + // The Generative AI product as identified by the client instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate with + // Mistral, the gen_ai.system is set to openai based on the instrumentation's best + // knowledge. + AttributeGenAiSystem = "gen_ai.system" + // The number of tokens used in the LLM response (completion). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 180 + AttributeGenAiUsageCompletionTokens = "gen_ai.usage.completion_tokens" + // The number of tokens used in the LLM prompt. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiUsagePromptTokens = "gen_ai.usage.prompt_tokens" +) + +const ( + // OpenAI + AttributeGenAiSystemOpenai = "openai" +) + +// Attributes for GraphQL. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Java Virtual machine related attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" + // Name of the garbage collector action. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // GarbageCollectionNotificationInfo#getGcAction(). + AttributeJvmGcAction = "jvm.gc.action" + // Name of the garbage collector. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // GarbageCollectionNotificationInfo#getGcName(). + AttributeJvmGcName = "jvm.gc.name" + // Name of the memory pool. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" + // Whether the thread is daemon or not. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeJvmThreadDaemon = "jvm.thread.daemon" + // State of the thread. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + AttributeJvmThreadState = "jvm.thread.state" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +const ( + // A thread that has not yet started is in this state + AttributeJvmThreadStateNew = "new" + // A thread executing in the Java virtual machine is in this state + AttributeJvmThreadStateRunnable = "runnable" + // A thread that is blocked waiting for a monitor lock is in this state + AttributeJvmThreadStateBlocked = "blocked" + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + AttributeJvmThreadStateWaiting = "waiting" + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + AttributeJvmThreadStateTimedWaiting = "timed_waiting" + // A thread that has exited is in this state + AttributeJvmThreadStateTerminated = "terminated" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
    + // If generated according to one of the mechanisms defined in Rec.
    + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // Last terminated reason of the Container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + AttributeK8SContainerStatusLastTerminatedReason = "k8s.container.status.last_terminated_reason" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// Attributes for a file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// The generic attributes that may be used in any Log Record. +const ( + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client.id" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // A boolean that is true if the publish message destination is anonymous (could + // be unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationPublishAnonymous = "messaging.destination_publish.anonymous" + // The name of the original destination the message was published to + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker doesn't have such notion, the original destination name SHOULD + // uniquely identify the broker. + AttributeMessagingDestinationPublishName = "messaging.destination_publish.name" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // The system-specific name of the messaging operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + AttributeMessagingOperationName = "messaging.operation.name" + // A string identifying the type of the messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperationType = "messaging.operation.type" + // The messaging system as identified by the client instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the client. + // For example, when using Kafka client libraries to communicate with Azure Event + // Hubs, the messaging.system is set to kafka based on the instrumentation's best + // knowledge. + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationTypePublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationTypeCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationTypeReceive = "receive" + // One or more messages are delivered to or processed by a consumer + AttributeMessagingOperationTypeDeliver = "process" + // One or more messages are settled + AttributeMessagingOperationTypeSettle = "settle" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" +) + +// This group describes attributes specific to Apache Kafka. +const ( + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer.group" + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaMessageOffset = "messaging.kafka.message.offset" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" +) + +// This group describes attributes specific to RabbitMQ. +const ( + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" +) + +// This group describes attributes specific to RocketMQ. +const ( + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + AttributeMessagingRocketmqClientGroup = "messaging.rocketmq.client_group" + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // The ack deadline in seconds set for the modify ack deadline request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeMessagingGCPPubsubMessageAckDeadline = "messaging.gcp_pubsub.message.ack_deadline" + // The ack id for a given message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack_id' + AttributeMessagingGCPPubsubMessageAckID = "messaging.gcp_pubsub.message.ack_id" + // The delivery attempt for a given message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingGCPPubsubMessageDeliveryAttempt = "messaging.gcp_pubsub.message.delivery_attempt" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" +) + +// This group describes attributes specific to Azure Service Bus. +const ( + // The name of the subscription in the topic messages are received from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mySubscription' + AttributeMessagingServicebusDestinationSubscriptionName = "messaging.servicebus.destination.subscription_name" + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +// This group describes attributes specific to Azure Event Hubs. +const ( + // The name of the consumer group the event consumer is associated with. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'indexer' + AttributeMessagingEventhubsConsumerGroup = "messaging.eventhubs.consumer.group" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// Attributes used by the OpenTracing Shim layer. +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// Attributes reserved for OpenTelemetry +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // Specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessContextSwitchType = "process.context_switch_type" + // The date and time the process was created, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + AttributeProcessCreationTime = "process.creation.time" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The exit code of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 127 + AttributeProcessExitCode = "process.exit.code" + // The date and time the process exited, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + AttributeProcessExitTime = "process.exit.time" + // The PID of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 23 + AttributeProcessGroupLeaderPID = "process.group_leader.pid" + // Whether the process is connected to an interactive shell. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeProcessInteractive = "process.interactive" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // The type of page fault for this data point. Type major is for major/hard page + // faults, and minor is for minor/soft page faults. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessPagingFaultType = "process.paging.fault_type" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The real user ID (RUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000 + AttributeProcessRealUserID = "process.real_user.id" + // The username of the real user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessRealUserName = "process.real_user.name" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // The saved user ID (SUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1002 + AttributeProcessSavedUserID = "process.saved_user.id" + // The username of the saved user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessSavedUserName = "process.saved_user.name" + // The PID of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 14 + AttributeProcessSessionLeaderPID = "process.session_leader.pid" + // The effective user ID (EUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1001 + AttributeProcessUserID = "process.user.id" + // The username of the effective user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessUserName = "process.user.name" + // Virtual process identifier. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process namespace + // that the process exists within. + AttributeProcessVpid = "process.vpid" +) + +const ( + // voluntary + AttributeProcessContextSwitchTypeVoluntary = "voluntary" + // involuntary + AttributeProcessContextSwitchTypeInvoluntary = "involuntary" +) + +const ( + // major + AttributeProcessPagingFaultTypeMajor = "major" + // minor + AttributeProcessPagingFaultTypeMinor = "minor" +) + +// Attributes for process CPU +const ( + // The CPU state of the process. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessCPUState = "process.cpu.state" +) + +const ( + // system + AttributeProcessCPUStateSystem = "system" + // user + AttributeProcessCPUStateUser = "user" + // wait + AttributeProcessCPUStateWait = "wait" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageCompressedSize = "rpc.message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeRPCMessageID = "rpc.message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageType = "rpc.message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageUncompressedSize = "rpc.message.uncompressed_size" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // sent + AttributeRPCMessageTypeSent = "SENT" + // received + AttributeRPCMessageTypeReceived = "RECEIVED" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Describes System attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" + // The state of the CPU + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + AttributeSystemCPUState = "system.cpu.state" +) + +const ( + // user + AttributeSystemCPUStateUser = "user" + // system + AttributeSystemCPUStateSystem = "system" + // nice + AttributeSystemCPUStateNice = "nice" + // idle + AttributeSystemCPUStateIdle = "idle" + // iowait + AttributeSystemCPUStateIowait = "iowait" + // interrupt + AttributeSystemCPUStateInterrupt = "interrupt" + // steal + AttributeSystemCPUStateSteal = "steal" +) + +// Describes System Memory attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Also called an SNI, this tells the server which hostname to which the client is + // attempting to connect to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + AttributeTLSClientServerName = "tls.client.server_name" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The low-cardinality template of an absolute path reference. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + AttributeURLTemplate = "url.template" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +// The attributes used to describe the packaged software running the +// application code. +const ( + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The name of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeAndroidOSAPILevel, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreDiagnosticsExceptionResult, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeAspnetcoreRoutingMatchStatus, + AttributeAWSRequestID, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeAWSLambdaInvokedARN, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerCPUState, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeDBClientConnectionsPoolName, + AttributeDBClientConnectionsState, + AttributeDBCollectionName, + AttributeDBNamespace, + AttributeDBOperationName, + AttributeDBQueryText, + AttributeDBSystem, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchClusterName, + AttributeDBElasticsearchNodeName, + AttributeDeploymentEnvironment, + AttributeAndroidState, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeErrorType, + AttributeEventName, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeGenAiCompletion, + AttributeGenAiPrompt, + AttributeGenAiRequestMaxTokens, + AttributeGenAiRequestModel, + AttributeGenAiRequestTemperature, + AttributeGenAiRequestTopP, + AttributeGenAiResponseFinishReasons, + AttributeGenAiResponseID, + AttributeGenAiResponseModel, + AttributeGenAiSystem, + AttributeGenAiUsageCompletionTokens, + AttributeGenAiUsagePromptTokens, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeJvmBufferPoolName, + AttributeJvmGcAction, + AttributeJvmGcName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeJvmThreadDaemon, + AttributeJvmThreadState, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SContainerStatusLastTerminatedReason, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributeLogRecordUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingDestinationPublishAnonymous, + AttributeMessagingDestinationPublishName, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperationName, + AttributeMessagingOperationType, + AttributeMessagingSystem, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageOffset, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqClientGroup, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingGCPPubsubMessageAckDeadline, + AttributeMessagingGCPPubsubMessageAckID, + AttributeMessagingGCPPubsubMessageDeliveryAttempt, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingServicebusDestinationSubscriptionName, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingEventhubsConsumerGroup, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOpentracingRefType, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributePeerService, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessContextSwitchType, + AttributeProcessCreationTime, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessExitCode, + AttributeProcessExitTime, + AttributeProcessGroupLeaderPID, + AttributeProcessInteractive, + AttributeProcessOwner, + AttributeProcessPagingFaultType, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRealUserID, + AttributeProcessRealUserName, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessSavedUserID, + AttributeProcessSavedUserName, + AttributeProcessSessionLeaderPID, + AttributeProcessUserID, + AttributeProcessUserName, + AttributeProcessVpid, + AttributeProcessCPUState, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMessageCompressedSize, + AttributeRPCMessageID, + AttributeRPCMessageType, + AttributeRPCMessageUncompressedSize, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeSourceAddress, + AttributeSourcePort, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemCPUState, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientServerName, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTemplate, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + AttributeWebEngineDescription, + AttributeWebEngineName, + AttributeWebEngineVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go new file mode 100644 index 00000000000..ac6893c3b26 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_event.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetEventSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go new file mode 100644 index 00000000000..bb89e4806f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_resource.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go new file mode 100644 index 00000000000..380529563a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/generated_trace.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go new file mode 100644 index 00000000000..dcd02283faf --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go new file mode 100644 index 00000000000..5042d2eba86 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.27.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/collector/semconv/v1.27.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go new file mode 100644 index 00000000000..6247b5d7917 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_attribute_group.go @@ -0,0 +1,5843 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +// The Android platform on which the Android application is running. +const ( + // Uniquely identifies the framework API revision offered by a version + // (os.version) of the android operating system. More information can be found + // here. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '33', '32' + AttributeAndroidOSAPILevel = "android.os.api_level" +) + +// This group describes attributes specific to artifacts. Artifacts are files +// or other immutable objects that are intended for distribution. This +// definition aligns directly with the +// [SLSA](https://slsa.dev/spec/v1.0/terminology#package-model) package model. +const ( + // The provenance filename of the built attestation which directly relates to the + // build artifact filename. This filename SHOULD accompany the artifact at publish + // time. See the SLSA Relationship specification for more information. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'golang-binary-amd64-v0.1.0.attestation', 'docker-image- + // amd64-v0.1.0.intoto.json1', 'release-1.tar.gz.attestation', 'file-name- + // package.tar.gz.intoto.json1' + AttributeArtifactAttestationFilename = "artifact.attestation.filename" + // The full hash value (see glossary), of the built attestation. Some envelopes in + // the software attestation space also refer to this as the digest. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408' + AttributeArtifactAttestationHash = "artifact.attestation.hash" + // The id of the build software attestation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123' + AttributeArtifactAttestationID = "artifact.attestation.id" + // The human readable file name of the artifact, typically generated during build + // and release processes. Often includes the package name and version in the file + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'golang-binary-amd64-v0.1.0', 'docker-image-amd64-v0.1.0', + // 'release-1.tar.gz', 'file-name-package.tar.gz' + // Note: This file name can also act as the Package Name + // in cases where the package ecosystem maps accordingly. + // Additionally, the artifact can be published + // for others, but that is not a guarantee. + AttributeArtifactFilename = "artifact.filename" + // The full hash value (see glossary), often found in checksum.txt on a release of + // the artifact and used to verify package integrity. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9' + // Note: The specific algorithm used to create the cryptographic hash value is + // not defined. In situations where an artifact has multiple + // cryptographic hashes, it is up to the implementer to choose which + // hash value to set here; this should be the most secure hash algorithm + // that is suitable for the situation and consistent with the + // corresponding attestation. The implementer can then provide the other + // hash values through an additional set of attribute extensions as they + // deem necessary. + AttributeArtifactHash = "artifact.hash" + // The Package URL of the package artifact provides a standard way to identify and + // locate the packaged artifact. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'pkg:github/package-url/purl-spec@1209109710924', + // 'pkg:npm/foo@12.12.3' + AttributeArtifactPurl = "artifact.purl" + // The version of the artifact. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v0.1.0', '1.2.1', '122691-build' + AttributeArtifactVersion = "artifact.version" +) + +// ASP.NET Core attributes +const ( + // Rate-limiting result, shows whether the lease was acquired or contains a + // rejection reason + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AttributeAspnetcoreRateLimitingResult = "aspnetcore.rate_limiting.result" + // Full type name of the IExceptionHandler implementation that handled the + // exception. + // + // Type: string + // Requirement Level: Conditionally Required - if and only if the exception was + // handled by this handler. + // Stability: stable + // Examples: 'Contoso.MyHandler' + AttributeAspnetcoreDiagnosticsHandlerType = "aspnetcore.diagnostics.handler.type" + // ASP.NET Core exception middleware handling result + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AttributeAspnetcoreDiagnosticsExceptionResult = "aspnetcore.diagnostics.exception.result" + // Rate limiting policy name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AttributeAspnetcoreRateLimitingPolicy = "aspnetcore.rate_limiting.policy" + // Flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRequestIsUnhandled = "aspnetcore.request.is_unhandled" + // A value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Examples: True + AttributeAspnetcoreRoutingIsFallback = "aspnetcore.routing.is_fallback" + // Match result - success or failure + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'success', 'failure' + AttributeAspnetcoreRoutingMatchStatus = "aspnetcore.routing.match_status" +) + +const ( + // Lease was acquired + AttributeAspnetcoreRateLimitingResultAcquired = "acquired" + // Lease request was rejected by the endpoint limiter + AttributeAspnetcoreRateLimitingResultEndpointLimiter = "endpoint_limiter" + // Lease request was rejected by the global limiter + AttributeAspnetcoreRateLimitingResultGlobalLimiter = "global_limiter" + // Lease request was canceled + AttributeAspnetcoreRateLimitingResultRequestCanceled = "request_canceled" +) + +const ( + // Exception was handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultHandled = "handled" + // Exception was not handled by the exception handling middleware + AttributeAspnetcoreDiagnosticsExceptionResultUnhandled = "unhandled" + // Exception handling was skipped because the response had started + AttributeAspnetcoreDiagnosticsExceptionResultSkipped = "skipped" + // Exception handling didn't run because the request was aborted + AttributeAspnetcoreDiagnosticsExceptionResultAborted = "aborted" +) + +const ( + // Match succeeded + AttributeAspnetcoreRoutingMatchStatusSuccess = "success" + // Match failed + AttributeAspnetcoreRoutingMatchStatusFailure = "failure" +) + +// Generic attributes for AWS services. +const ( + // The AWS request ID as returned in the response headers x-amz-request-id or + // x-amz-requestid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AttributeAWSRequestID = "aws.request_id" +) + +// Attributes for AWS DynamoDB. +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The value of the Count response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The JSON-serialized value of each item in the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The value of the IndexName request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the Limit request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" + // The value of the ScannedCount response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" + // The value of the Segment request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the Select request parameter. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" + // The number of items in the TableNames response parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" + // The keys in the RequestItems object field. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The value of the TotalSegments request parameter. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" +) + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // The ID of a running ECS task. The ID MUST be extracted from task.arn. + // + // Type: string + // Requirement Level: Conditionally Required - If and only if `task.arn` is + // populated. + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskID = "aws.ecs.task.id" + // The ARN of an ECS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of a running ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task- + // id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The family name of the ECS task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for the task definition used to create the ECS task. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Attributes for AWS Logs. +const ( + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" +) + +// Attributes for AWS Lambda. +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from cloud.resource_id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// Attributes for AWS S3. +const ( + // The S3 bucket name the request refers to. Corresponds to the --bucket parameter + // of the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The bucket attribute is applicable to all S3 operations that reference a + // bucket, i.e. that require the bucket name as a mandatory parameter. + // This applies to almost all S3 operations except list-buckets. + AttributeAWSS3Bucket = "aws.s3.bucket" + // The source object (in the form bucket/key) for the copy operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The copy_source attribute applies to S3 copy operations and corresponds + // to the --copy-source parameter + // of the copy-object operation within the S3 API. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3CopySource = "aws.s3.copy_source" + // The delete request container that specifies the objects to be deleted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string} + // ],Quiet=boolean' + // Note: The delete attribute is only applicable to the delete-object operation. + // The delete attribute corresponds to the --delete parameter of the + // delete-objects operation within the S3 API. + AttributeAWSS3Delete = "aws.s3.delete" + // The S3 object key the request refers to. Corresponds to the --key parameter of + // the S3 API operations. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The key attribute is applicable to all object-related S3 operations, i.e. + // that require the object key as a mandatory parameter. + // This applies in particular to the following operations:
      + //
    • copy-object
    • + //
    • delete-object
    • + //
    • get-object
    • + //
    • head-object
    • + //
    • put-object
    • + //
    • restore-object
    • + //
    • select-object-content
    • + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • create-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3Key = "aws.s3.key" + // The part number of the part being uploaded in a multipart-upload operation. + // This is a positive integer between 1 and 10,000. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3456 + // Note: The part_number attribute is only applicable to the upload-part + // and upload-part-copy operations. + // The part_number attribute corresponds to the --part-number parameter of the + // upload-part operation within the S3 API. + AttributeAWSS3PartNumber = "aws.s3.part_number" + // Upload ID that identifies the multipart upload. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The upload_id attribute applies to S3 multipart-upload operations and + // corresponds to the --upload-id parameter + // of the S3 API multipart operations. + // This applies in particular to the following operations:
      + //
    • abort-multipart-upload
    • + //
    • complete-multipart-upload
    • + //
    • list-parts
    • + //
    • upload-part
    • + //
    • upload-part-copy
    • + //
    + AttributeAWSS3UploadID = "aws.s3.upload_id" +) + +// Generic attributes for Azure SDK. +const ( + // The unique identifier of the service request. It's generated by the Azure + // service and returned with the response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00000000-0000-0000-0000-000000000000' + AttributeAzServiceRequestID = "az.service_request_id" +) + +// The web browser attributes +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.brands). + AttributeBrowserBrands = "browser.brands" + // Preferred language of the user using the browser + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // navigator.language. + AttributeBrowserLanguage = "browser.language" + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.mobile). If unavailable, this attribute SHOULD be left + // unset. + AttributeBrowserMobile = "browser.mobile" + // The platform on which the browser is running + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the UA client hints API + // (navigator.userAgentData.platform). If unavailable, the legacy + // navigator.platform API SHOULD NOT be used instead and this attribute SHOULD be + // left unset in order for the values to be consistent. + // The list of possible values is defined in the W3C User-Agent Client Hints + // specification. Note that some (but not all) of these values can overlap with + // values in the os.type and os.name attributes. However, for consistency, the + // values in the browser.platform attribute should capture the exact value that + // the user agent provides. + AttributeBrowserPlatform = "browser.platform" +) + +// This group describes attributes specific to pipelines within a Continuous +// Integration and Continuous Deployment (CI/CD) system. A +// [pipeline](https://en.wikipedia.org/wiki/Pipeline_(computing)) in this case +// is a series of steps that are performed in order to deliver a new version of +// software. This aligns with the +// [Britannica](https://www.britannica.com/dictionary/pipeline) definition of a +// pipeline where a **pipeline** is the system for developing and producing +// something. In the context of CI/CD, a pipeline produces or delivers +// software. +const ( + // The human readable name of the pipeline within a CI/CD system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Build and Test', 'Lint', 'Deploy Go Project', + // 'deploy_to_environment' + AttributeCicdPipelineName = "cicd.pipeline.name" + // The unique identifier of a pipeline run within a CI/CD system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '120912' + AttributeCicdPipelineRunID = "cicd.pipeline.run.id" + // The human readable name of a task within a pipeline. Task here most closely + // aligns with a computing process in a pipeline. Other terms for tasks include + // commands, steps, and procedures. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Run GoLang Linter', 'Go Build', 'go-test', 'deploy_binary' + AttributeCicdPipelineTaskName = "cicd.pipeline.task.name" + // The unique identifier of a task run within a pipeline. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '12097' + AttributeCicdPipelineTaskRunID = "cicd.pipeline.task.run.id" + // The URL of the pipeline run providing the complete address in order to locate + // and identify the pipeline run. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/open-telemetry/semantic- + // conventions/actions/runs/9753949763/job/26920038674?pr=1075' + AttributeCicdPipelineTaskRunURLFull = "cicd.pipeline.task.run.url.full" + // The type of the task within a pipeline. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'build', 'test', 'deploy' + AttributeCicdPipelineTaskType = "cicd.pipeline.task.type" +) + +const ( + // build + AttributeCicdPipelineTaskTypeBuild = "build" + // test + AttributeCicdPipelineTaskTypeTest = "test" + // deploy + AttributeCicdPipelineTaskTypeDeploy = "deploy" +) + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Client address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through an + // intermediary, client.address SHOULD represent the client address behind any + // intermediaries, for example proxies, if it's available. + AttributeClientAddress = "client.address" + // Client port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through an + // intermediary, client.port SHOULD represent the client port behind any + // intermediaries, for example proxies, if it's available. + AttributeClientPort = "client.port" +) + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // The cloud account ID the resource is assigned to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" + // Name of the cloud provider. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeCloudProvider = "cloud.provider" + // The geographical region the resource is running. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // Alibaba Cloud regions, AWS regions, Azure regions, Google Cloud regions, or + // Tencent Cloud regions. + AttributeCloudRegion = "cloud.region" + // Cloud provider-specific native identifier of the monitored cloud resource (e.g. + // an ARN on AWS, a fully qualified resource ID on Azure, a full resource name on + // GCP) + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', '//run.googl + // eapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', '/sub + // scriptions//resourceGroups//providers/Microsoft.Web/sites + // //functions/' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so it may be necessary to set cloud.resource_id as a span attribute instead.The + // exact value to use for cloud.resource_id depends on the cloud provider. + // The following well-known definitions MUST be used if you set this attribute and + // they apply:
      + //
    • AWS Lambda: The function ARN. + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix + // with the resolved function version, as the same runtime instance may be + // invocable with + // multiple different aliases.
    • + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID of the invoked function, + // not the function app, having the form + // /subscriptions//resourceGroups//providers/Microsoft.Web/s + // ites//functions/. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider.
    • + //
    + AttributeCloudResourceID = "cloud.resource_id" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // Red Hat OpenShift on Alibaba Cloud + AttributeCloudPlatformAlibabaCloudOpenshift = "alibaba_cloud_openshift" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // AWS App Runner + AttributeCloudPlatformAWSAppRunner = "aws_app_runner" + // Red Hat OpenShift on AWS (ROSA) + AttributeCloudPlatformAWSOpenshift = "aws_openshift" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Apps + AttributeCloudPlatformAzureContainerApps = "azure_container_apps" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Azure Red Hat OpenShift + AttributeCloudPlatformAzureOpenshift = "azure_openshift" + // Google Bare Metal Solution (BMS) + AttributeCloudPlatformGCPBareMetalSolution = "gcp_bare_metal_solution" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" + // Red Hat OpenShift on Google Cloud + AttributeCloudPlatformGCPOpenshift = "gcp_openshift" + // Red Hat OpenShift on IBM Cloud + AttributeCloudPlatformIbmCloudOpenshift = "ibm_cloud_openshift" + // Tencent Cloud Cloud Virtual Machine (CVM) + AttributeCloudPlatformTencentCloudCvm = "tencent_cloud_cvm" + // Tencent Cloud Elastic Kubernetes Service (EKS) + AttributeCloudPlatformTencentCloudEKS = "tencent_cloud_eks" + // Tencent Cloud Serverless Cloud Function (SCF) + AttributeCloudPlatformTencentCloudScf = "tencent_cloud_scf" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" + // Heroku Platform as a Service + AttributeCloudProviderHeroku = "heroku" + // IBM Cloud + AttributeCloudProviderIbmCloud = "ibm_cloud" + // Tencent Cloud + AttributeCloudProviderTencentCloud = "tencent_cloud" +) + +// Attributes for CloudEvents. +const ( + // The event_id uniquely identifies the event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + AttributeCloudeventsEventID = "cloudevents.event_id" + // The source identifies the context in which an event happened. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + AttributeCloudeventsEventSource = "cloudevents.event_source" + // The version of the CloudEvents specification which the event uses. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.0' + AttributeCloudeventsEventSpecVersion = "cloudevents.event_spec_version" + // The subject of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + AttributeCloudeventsEventSubject = "cloudevents.event_subject" + // The event_type contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + AttributeCloudeventsEventType = "cloudevents.event_type" +) + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // The column number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 16 + AttributeCodeColumn = "code.column" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeCodeStacktrace = "code.stacktrace" +) + +// A container instance. +const ( + // The command used to run the container (i.e. the command name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended to + // remove them to prevent potential leakage. + AttributeContainerCommand = "container.command" + // All the command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + AttributeContainerCommandArgs = "container.command_args" + // The full command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + AttributeContainerCommandLine = "container.command_line" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // Runtime specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; container.image.id corresponds + // to the Image field from the Docker container inspect API endpoint. + // K8S defines a link to the container registry repository with digest "imageID": + // "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e + // 8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625". + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using oci.manifest.digest if it is important to identify + // the same image in different environments/runtimes. + AttributeContainerImageID = "container.image.id" + // Name of the image the container was built on. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Repo digests of the container image as provided by the container runtime. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d7 + // 02d249a0ccb', 'internal.registry.example.com:5000/example@sha256:b69959407d21e8 + // a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: Docker and CRI report those under the RepoDigests field. + AttributeContainerImageRepoDigests = "container.image.repo_digests" + // Container image tags. An example can be found in Docker Image Inspect. Should + // be only the section of the full name for example from + // registry.example.com/my-org/my-image:. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + AttributeContainerImageTags = "container.image.tags" + // Container name used by container runtime. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // The container runtime managing this container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" +) + +// Attributes specific to a cpu instance. +const ( + // The mode of the CPU + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'user', 'system' + AttributeCPUMode = "cpu.mode" +) + +const ( + // user + AttributeCPUModeUser = "user" + // system + AttributeCPUModeSystem = "system" + // nice + AttributeCPUModeNice = "nice" + // idle + AttributeCPUModeIdle = "idle" + // iowait + AttributeCPUModeIowait = "iowait" + // interrupt + AttributeCPUModeInterrupt = "interrupt" + // steal + AttributeCPUModeSteal = "steal" + // kernel + AttributeCPUModeKernel = "kernel" +) + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // The name of the connection pool; unique within the instrumented application. In + // case the connection pool implementation doesn't provide a name, instrumentation + // SHOULD use a combination of parameters that would make the name unique, for + // example, combining attributes server.address, server.port, and db.namespace, + // formatted as server.address:server.port/db.namespace. Instrumentations that + // generate connection pool name following different patterns SHOULD document it. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myDataSource' + AttributeDBClientConnectionPoolName = "db.client.connection.pool.name" + // The state of a connection in the pool + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'idle' + AttributeDBClientConnectionState = "db.client.connection.state" + // The name of a collection (table, container) within the database. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // If the collection name is parsed from the query text, it SHOULD be the first + // collection name found in the query and it SHOULD match the value provided in + // the query text including any schema and database name prefix. + // For batch operations, if the individual operations are known to have the same + // collection name then that collection name SHOULD be used, otherwise + // db.collection.name SHOULD NOT be captured. + AttributeDBCollectionName = "db.collection.name" + // The name of the database, fully qualified within the server address and port. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they SHOULD be + // concatenated (potentially using database system specific conventions) from most + // general to most specific namespace component, and more specific namespaces + // SHOULD NOT be captured without the more general namespaces, to ensure that + // "startswith" queries for the more general namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document what + // db.namespace means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application without + // attempting to do any case normalization. + AttributeDBNamespace = "db.namespace" + // The number of queries included in a batch operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2, 3, 4 + // Note: Operations are only considered batches when they contain two or more + // operations, and so db.operation.batch.size SHOULD never be 1. + AttributeDBOperationBatchSize = "db.operation.batch.size" + // The name of the operation or command being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + // If the operation name is parsed from the query text, it SHOULD be the first + // operation name found in the query. + // For batch operations, if the individual operations are known to have the same + // operation name then that operation name SHOULD be used prepended by BATCH, + // otherwise db.operation.name SHOULD be BATCH or some other database system + // specific term if more applicable. + AttributeDBOperationName = "db.operation.name" + // The database query being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey "WuValue"' + // Note: For sanitization see Sanitization of db.query.text. + // For batch operations, if the individual operations are known to have the same + // query text then that query text SHOULD be used, otherwise all of the individual + // query texts SHOULD be concatenated with separator ; or some other database + // system specific separator if more applicable. + // Even though parameterized query text can potentially have sensitive data, by + // using a parameterized query the user is giving a strong signal that any + // sensitive data will be passed as parameter values, and the benefit to + // observability of capturing the static part of the query text by default + // outweighs the risk. + AttributeDBQueryText = "db.query.text" + // The database management system (DBMS) product as identified by the client + // instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. For + // example, when using PostgreSQL client libraries to connect to a CockroachDB, + // the db.system is set to postgresql based on the instrumentation's best + // knowledge. + AttributeDBSystem = "db.system" +) + +const ( + // idle + AttributeDBClientConnectionStateIdle = "idle" + // used + AttributeDBClientConnectionStateUsed = "used" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Deprecated, use `intersystems_cache` instead + AttributeDBSystemCache = "cache" + // InterSystems Caché + AttributeDBSystemIntersystemsCache = "intersystems_cache" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // ClickHouse + AttributeDBSystemClickhouse = "clickhouse" + // Deprecated, use `other_sql` instead + AttributeDBSystemCloudscape = "cloudscape" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" + // Deprecated, no replacement at this time + AttributeDBSystemColdfusion = "coldfusion" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // Apache Derby + AttributeDBSystemDerby = "derby" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Deprecated, use `other_sql` instead + AttributeDBSystemFirstSQL = "firstsql" + // Apache Geode + AttributeDBSystemGeode = "geode" + // H2 + AttributeDBSystemH2 = "h2" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // Apache Hive + AttributeDBSystemHive = "hive" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // InfluxDB + AttributeDBSystemInfluxdb = "influxdb" + // Informix + AttributeDBSystemInformix = "informix" + // Ingres + AttributeDBSystemIngres = "ingres" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // Memcached + AttributeDBSystemMemcached = "memcached" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // Deprecated, Microsoft SQL Server Compact is discontinued + AttributeDBSystemMssqlcompact = "mssqlcompact" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Netezza + AttributeDBSystemNetezza = "netezza" + // OpenSearch + AttributeDBSystemOpensearch = "opensearch" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Progress Database + AttributeDBSystemProgress = "progress" + // Redis + AttributeDBSystemRedis = "redis" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Cloud Spanner + AttributeDBSystemSpanner = "spanner" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Trino + AttributeDBSystemTrino = "trino" + // Vertica + AttributeDBSystemVertica = "vertica" +) + +// This group defines attributes for Cassandra. +const ( + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The data center of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" + // The ID of the coordinating node for a query. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // Whether or not the query is idempotent. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// This group defines attributes for Azure Cosmos DB. +const ( + // Unique Cosmos client instance id. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + AttributeDBCosmosDBClientID = "db.cosmosdb.client_id" + // Cosmos client connection mode. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBConnectionMode = "db.cosmosdb.connection_mode" + // CosmosDB Operation Type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBOperationType = "db.cosmosdb.operation_type" + // RU consumed for that operation + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + AttributeDBCosmosDBRequestCharge = "db.cosmosdb.request_charge" + // Request payload size in bytes + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeDBCosmosDBRequestContentLength = "db.cosmosdb.request_content_length" + // Cosmos DB status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 200, 201 + AttributeDBCosmosDBStatusCode = "db.cosmosdb.status_code" + // Cosmos DB sub status code. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000, 1002 + AttributeDBCosmosDBSubStatusCode = "db.cosmosdb.sub_status_code" +) + +const ( + // Gateway (HTTP) connections mode + AttributeDBCosmosDBConnectionModeGateway = "gateway" + // Direct connection + AttributeDBCosmosDBConnectionModeDirect = "direct" +) + +const ( + // invalid + AttributeDBCosmosDBOperationTypeInvalid = "Invalid" + // create + AttributeDBCosmosDBOperationTypeCreate = "Create" + // patch + AttributeDBCosmosDBOperationTypePatch = "Patch" + // read + AttributeDBCosmosDBOperationTypeRead = "Read" + // read_feed + AttributeDBCosmosDBOperationTypeReadFeed = "ReadFeed" + // delete + AttributeDBCosmosDBOperationTypeDelete = "Delete" + // replace + AttributeDBCosmosDBOperationTypeReplace = "Replace" + // execute + AttributeDBCosmosDBOperationTypeExecute = "Execute" + // query + AttributeDBCosmosDBOperationTypeQuery = "Query" + // head + AttributeDBCosmosDBOperationTypeHead = "Head" + // head_feed + AttributeDBCosmosDBOperationTypeHeadFeed = "HeadFeed" + // upsert + AttributeDBCosmosDBOperationTypeUpsert = "Upsert" + // batch + AttributeDBCosmosDBOperationTypeBatch = "Batch" + // query_plan + AttributeDBCosmosDBOperationTypeQueryPlan = "QueryPlan" + // execute_javascript + AttributeDBCosmosDBOperationTypeExecuteJavascript = "ExecuteJavaScript" +) + +// This group defines attributes for Elasticsearch. +const ( + // Represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + AttributeDBElasticsearchNodeName = "db.elasticsearch.node.name" +) + +// Attributes for software deployments. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: deployment.environment.name does not affect the uniqueness constraints + // defined through + // the service.namespace, service.name and service.instance.id resource + // attributes. + // This implies that resources carrying the following attribute combinations MUST + // be + // considered to be identifying the same service:
      + //
    • service.name=frontend, deployment.environment.name=production
    • + //
    • service.name=frontend, deployment.environment.name=staging.
    • + //
    + AttributeDeploymentEnvironmentName = "deployment.environment.name" + // The id of the deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1208' + AttributeDeploymentID = "deployment.id" + // The name of the deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'deploy my app', 'deploy-frontend' + AttributeDeploymentName = "deployment.name" + // The status of the deployment. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeDeploymentStatus = "deployment.status" +) + +const ( + // failed + AttributeDeploymentStatusFailed = "failed" + // succeeded + AttributeDeploymentStatusSucceeded = "succeeded" +) + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // Deprecated use the device.app.lifecycle event definition including + // android.state as a payload field instead. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in Activity lifecycle callbacks, + // and from which the OS identifiers are derived. + AttributeAndroidState = "android.state" +) + +const ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AttributeAndroidStateCreated = "created" + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AttributeAndroidStateBackground = "background" + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AttributeAndroidStateForeground = "foreground" +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Destination address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through an + // intermediary, destination.address SHOULD represent the destination address + // behind any intermediaries, for example proxies, if it's available. + AttributeDestinationAddress = "destination.address" + // Destination port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeDestinationPort = "destination.port" +) + +// Describes device attributes. +const ( + // A unique identifier representing the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The name of the device manufacturer + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via Build. iOS apps SHOULD hardcode + // the value Apple. + AttributeDeviceManufacturer = "device.manufacturer" + // The model identifier for the device + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of the + // device model rather than a machine-readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// These attributes may be used for any disk related operation. +const ( + // The disk IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'read' + AttributeDiskIoDirection = "disk.io.direction" +) + +const ( + // read + AttributeDiskIoDirectionRead = "read" + // write + AttributeDiskIoDirectionWrite = "write" +) + +// The shared attributes used to report a DNS query. +const ( + // The name being queried. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or above + // 126), those characters should be represented as escaped base 10 integers + // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns, and + // line feeds should be converted to \t, \r, and \n respectively. + AttributeDNSQuestionName = "dns.question.name" +) + +// The shared attributes used to report an error. +const ( + // Describes a class of error the operation ended with. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The error.type SHOULD be predictable, and SHOULD have low + // cardinality.When error.type is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used.Instrumentations SHOULD document the list of errors they report.The + // cardinality of error.type within one instrumentation library SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation libraries + // and applications + // should be prepared for error.type to have high cardinality at query time when + // no + // additional filters are applied.If the operation has completed successfully, + // instrumentations SHOULD NOT set error.type.If a specific domain defines its own + // set of error identifiers (such as HTTP or gRPC status codes), + // it's RECOMMENDED to:
      + //
    • Use a domain-specific attribute
    • + //
    • Set error.type to capture all errors, regardless of whether they are + // defined within the domain-specific set or not.
    • + //
    + AttributeErrorType = "error.type" +) + +const ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + AttributeErrorTypeOther = "_OTHER" +) + +// Attributes for Events represented using Log Records. +const ( + // Identifies the class / type of event. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as attribute names. Notably, + // event names are namespaced to avoid collisions and provide a clean separation + // of semantics for events in separate domains like browser, mobile, and + // kubernetes. + AttributeEventName = "event.name" +) + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example for recording span exceptions.It follows that an + // exception may still escape the scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" + // The exception message. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" +) + +// FaaS attributes +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSColdstart = "faas.coldstart" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The invocation ID of the current function invocation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSInvocationID = "faas.invocation_id" + // The name of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" + // The amount of memory available to the serverless function converted to Bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information (which must be multiplied by 1,048,576). + AttributeFaaSMaxMemory = "faas.max_memory" + // The name of the single function that this runtime instance executes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // code.namespace/code.function + // span attributes).For some cloud providers, the above definition is ambiguous. + // The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products:
      + //
    • Azure: The full name /, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the cloud.resource_id attribute).
    • + //
    + AttributeFaaSName = "faas.name" + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // Type of the trigger which caused this function invocation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeFaaSTrigger = "faas.trigger" + // The immutable version of the function being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run (Services): The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" + // Tencent Cloud + AttributeFaaSInvokedProviderTencentCloud = "tencent_cloud" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Attributes for Feature Flags. +const ( + // The unique identifier of the feature flag. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'logo-color' + AttributeFeatureFlagKey = "feature_flag.key" + // The name of the service provider that performs the flag evaluation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Flag Manager' + AttributeFeatureFlagProviderName = "feature_flag.provider_name" + // SHOULD be a semantic identifier for a value. If one is unavailable, a + // stringified version of the value can be used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides a + // means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant red maybe be used for the value #c05543.A stringified + // version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + AttributeFeatureFlagVariant = "feature_flag.variant" +) + +// Describes file attributes. +const ( + // Directory where the file is located. It should include the drive letter, when + // appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + AttributeFileDirectory = "file.directory" + // File extension, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only the + // last one should be captured ("gz", not "tar.gz"). + AttributeFileExtension = "file.extension" + // Name of the file including the extension, without the directory. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.png' + AttributeFileName = "file.name" + // Full path to the file, including the file name. It should include the drive + // letter, when appropriate. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program Files\\MyApp\\myapp.exe' + AttributeFilePath = "file.path" + // File size in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeFileSize = "file.size" +) + +// Attributes for Google Cloud client libraries. +const ( + // Identifies the Google Cloud service for which the official client library is + // intended. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'appengine', 'run', 'firestore', 'alloydb', 'spanner' + // Note: Intended to be a stable identifier for Google Cloud client libraries that + // is uniform across implementation languages. The value should be derived from + // the canonical service domain for the service; for example, 'foo.googleapis.com' + // should result in a value of 'foo'. + AttributeGCPClientService = "gcp.client.service" +) + +// Attributes for Google Cloud Run. +const ( + // The name of the Cloud Run execution being run for the Job, as set by the + // CLOUD_RUN_EXECUTION environment variable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + AttributeGCPCloudRunJobExecution = "gcp.cloud_run.job.execution" + // The index for a task within an execution as provided by the + // CLOUD_RUN_TASK_INDEX environment variable. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1 + AttributeGCPCloudRunJobTaskIndex = "gcp.cloud_run.job.task_index" +) + +// Attributes for Google Compute Engine (GCE). +const ( + // The hostname of a GCE instance. This is the full value of the default or custom + // hostname. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', 'sample-vm.us-west1-b.c.my- + // project.internal' + AttributeGCPGceInstanceHostname = "gcp.gce.instance.hostname" + // The instance name of a GCE instance. This is the value provided by host.name, + // the visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the default internal DNS + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + AttributeGCPGceInstanceName = "gcp.gce.instance.name" +) + +// The attributes used to describe telemetry in the context of Generative +// Artificial Intelligence (GenAI) Models requests and responses. +const ( + // The full response received from the GenAI model. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching OpenAI + // messages format + AttributeGenAiCompletion = "gen_ai.completion" + // The name of the operation being performed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If one of the predefined values applies, but specific system uses a + // different name it's RECOMMENDED to document it in the semantic conventions for + // specific GenAI system and use system-specific name in the instrumentation. If a + // different name is not documented, instrumentation libraries SHOULD use + // applicable predefined value. + AttributeGenAiOperationName = "gen_ai.operation.name" + // The full prompt sent to the GenAI model. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching OpenAI + // messages format + AttributeGenAiPrompt = "gen_ai.prompt" + // The frequency penalty setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.1 + AttributeGenAiRequestFrequencyPenalty = "gen_ai.request.frequency_penalty" + // The maximum number of tokens the model generates for a request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiRequestMaxTokens = "gen_ai.request.max_tokens" + // The name of the GenAI model a request is being made to. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4' + AttributeGenAiRequestModel = "gen_ai.request.model" + // The presence penalty setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.1 + AttributeGenAiRequestPresencePenalty = "gen_ai.request.presence_penalty" + // List of sequences that the model will use to stop generating further tokens. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'forest', 'lived' + AttributeGenAiRequestStopSequences = "gen_ai.request.stop_sequences" + // The temperature setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 0.0 + AttributeGenAiRequestTemperature = "gen_ai.request.temperature" + // The top_k sampling setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0 + AttributeGenAiRequestTopK = "gen_ai.request.top_k" + // The top_p sampling setting for the GenAI request. + // + // Type: double + // Requirement Level: Optional + // Stability: experimental + // Examples: 1.0 + AttributeGenAiRequestTopP = "gen_ai.request.top_p" + // Array of reasons the model stopped generating tokens, corresponding to each + // generation received. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'stop' + AttributeGenAiResponseFinishReasons = "gen_ai.response.finish_reasons" + // The unique identifier for the completion. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + AttributeGenAiResponseID = "gen_ai.response.id" + // The name of the model that generated the response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + AttributeGenAiResponseModel = "gen_ai.response.model" + // The Generative AI product as identified by the client or server + // instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The gen_ai.system describes a family of GenAI models with specific model + // identified + // by gen_ai.request.model and gen_ai.response.model attributes.The actual GenAI + // product may differ from the one identified by the client. + // For example, when using OpenAI client libraries to communicate with Mistral, + // the gen_ai.system + // is set to openai based on the instrumentation's best knowledge.For custom + // model, a custom friendly name SHOULD be used. + // If none of these options apply, the gen_ai.system SHOULD be set to _OTHER. + AttributeGenAiSystem = "gen_ai.system" + // The type of token being counted. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'input', 'output' + AttributeGenAiTokenType = "gen_ai.token.type" + // The number of tokens used in the GenAI input (prompt). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 100 + AttributeGenAiUsageInputTokens = "gen_ai.usage.input_tokens" + // The number of tokens used in the GenAI response (completion). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 180 + AttributeGenAiUsageOutputTokens = "gen_ai.usage.output_tokens" +) + +const ( + // Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) + AttributeGenAiOperationNameChat = "chat" + // Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions) + AttributeGenAiOperationNameTextCompletion = "text_completion" +) + +const ( + // OpenAI + AttributeGenAiSystemOpenai = "openai" + // Vertex AI + AttributeGenAiSystemVertexAi = "vertex_ai" + // Anthropic + AttributeGenAiSystemAnthropic = "anthropic" + // Cohere + AttributeGenAiSystemCohere = "cohere" +) + +const ( + // Input tokens (prompt, input, etc.) + AttributeGenAiTokenTypeInput = "input" + // Output tokens (completion, response, etc.) + AttributeGenAiTokenTypeCompletion = "output" +) + +// Go related attributes. +const ( + // The type of memory. + // + // Type: Enum + // Requirement Level: Recommended + // Stability: experimental + // Examples: 'other', 'stack' + AttributeGoMemoryType = "go.memory.type" +) + +const ( + // Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use + AttributeGoMemoryTypeStack = "stack" + // Memory used by the Go runtime, excluding other categories of memory usage described in this enumeration + AttributeGoMemoryTypeOther = "other" +) + +// Attributes for GraphQL. +const ( + // The GraphQL document being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + AttributeGraphqlDocument = "graphql.document" + // The name of the operation being executed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'findBookByID' + AttributeGraphqlOperationName = "graphql.operation.name" + // The type of the operation being executed. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + AttributeGraphqlOperationType = "graphql.operation.type" +) + +const ( + // GraphQL query + AttributeGraphqlOperationTypeQuery = "query" + // GraphQL mutation + AttributeGraphqlOperationTypeMutation = "mutation" + // GraphQL subscription + AttributeGraphqlOperationTypeSubscription = "subscription" +) + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // Unique identifier for the application + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + AttributeHerokuAppID = "heroku.app.id" + // Commit hash for the current release + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + AttributeHerokuReleaseCommit = "heroku.release.commit" + // Time and date the release was created + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + AttributeHerokuReleaseCreationTimestamp = "heroku.release.creation_timestamp" +) + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // The CPU architecture the host system is running on. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeHostArch = "host.arch" + // The amount of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12288000 + AttributeHostCPUCacheL2Size = "host.cpu.cache.l2.size" + // Family or generation of the CPU. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + AttributeHostCPUFamily = "host.cpu.family" + // Model identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + AttributeHostCPUModelID = "host.cpu.model.id" + // Model designation of the processor. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + AttributeHostCPUModelName = "host.cpu.model.name" + // Stepping or core revisions. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + AttributeHostCPUStepping = "host.cpu.stepping" + // Processor manufacturer identifier. A maximum 12-character string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: CPUID command returns the vendor ID string in EBX, EDX and ECX registers. + // Writing these to memory in this order results in a 12-character string. + AttributeHostCPUVendorID = "host.cpu.vendor.id" + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized systems, this should be the machine-id. See the + // table below for the sources to use to determine the machine-id based on + // operating system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + AttributeHostID = "host.id" + // VM image ID or host OS image ID. For Cloud, this value is from the provider. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // The version string of the VM image or host OS as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" + // Available IP addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 addresses + // MUST be specified in the RFC 5952 format. + AttributeHostIP = "host.ip" + // Available MAC addresses of the host, excluding loopback interfaces. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in IEEE RA hexadecimal form: as hyphen- + // separated octets in uppercase hexadecimal form from most to least significant. + AttributeHostMac = "host.mac" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // IBM z/Architecture + AttributeHostArchS390x = "s390x" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// Semantic convention attributes in the HTTP namespace. +const ( + // State of the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'active', 'idle' + AttributeHTTPConnectionState = "http.connection.state" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPRequestBodySize = "http.request.body.size" + // HTTP request method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones + // listed in RFC9110 + // and the PATCH method defined in RFC5789.If the HTTP request method is not known + // to instrumentation, it MUST set the http.request.method attribute to _OTHER.If + // the HTTP instrumentation could end up converting valid HTTP request methods to + // _OTHER, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of + // case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is not a + // list of known methods in addition to the defaults).HTTP method names are case- + // sensitive and http.request.method attribute value MUST match a known HTTP + // method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods to be + // case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set http.request.method_original + // to the original value. + AttributeHTTPRequestMethod = "http.request.method" + // Original HTTP method sent by the client in the request line. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + AttributeHTTPRequestMethodOriginal = "http.request.method_original" + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + AttributeHTTPRequestResendCount = "http.request.resend_count" + // The total size of the request in bytes. This should be the total number of + // bytes sent over the wire, including the request line (HTTP/1.1), framing + // (HTTP/2 and HTTP/3), headers, and request body if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPRequestSize = "http.request.size" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3495 + AttributeHTTPResponseBodySize = "http.response.body.size" + // The total size of the response in bytes. This should be the total number of + // bytes sent over the wire, including the status line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1437 + AttributeHTTPResponseSize = "http.response.size" + // HTTP response status code. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 200 + AttributeHTTPResponseStatusCode = "http.response.status_code" + // The matched route, that is, the path template in the format used by the + // respective server framework. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP server + // framework as the route attribute should have low-cardinality and the URI path + // can NOT substitute it. + // SHOULD include the application root if there is one. + AttributeHTTPRoute = "http.route" +) + +const ( + // active state + AttributeHTTPConnectionStateActive = "active" + // idle state + AttributeHTTPConnectionStateIdle = "idle" +) + +const ( + // CONNECT method + AttributeHTTPRequestMethodConnect = "CONNECT" + // DELETE method + AttributeHTTPRequestMethodDelete = "DELETE" + // GET method + AttributeHTTPRequestMethodGet = "GET" + // HEAD method + AttributeHTTPRequestMethodHead = "HEAD" + // OPTIONS method + AttributeHTTPRequestMethodOptions = "OPTIONS" + // PATCH method + AttributeHTTPRequestMethodPatch = "PATCH" + // POST method + AttributeHTTPRequestMethodPost = "POST" + // PUT method + AttributeHTTPRequestMethodPut = "PUT" + // TRACE method + AttributeHTTPRequestMethodTrace = "TRACE" + // Any HTTP method that the instrumentation has no prior knowledge of + AttributeHTTPRequestMethodOther = "_OTHER" +) + +// Java Virtual machine related attributes. +const ( + // Name of the buffer pool. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via BufferPoolMXBean#getName(). + AttributeJvmBufferPoolName = "jvm.buffer.pool.name" + // Name of the garbage collector action. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // GarbageCollectionNotificationInfo#getGcAction(). + AttributeJvmGcAction = "jvm.gc.action" + // Name of the garbage collector. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // GarbageCollectionNotificationInfo#getGcName(). + AttributeJvmGcName = "jvm.gc.name" + // Name of the memory pool. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via MemoryPoolMXBean#getName(). + AttributeJvmMemoryPoolName = "jvm.memory.pool.name" + // The type of memory. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + AttributeJvmMemoryType = "jvm.memory.type" + // Whether the thread is daemon or not. + // + // Type: boolean + // Requirement Level: Optional + // Stability: stable + AttributeJvmThreadDaemon = "jvm.thread.daemon" + // State of the thread. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + AttributeJvmThreadState = "jvm.thread.state" +) + +const ( + // Heap memory + AttributeJvmMemoryTypeHeap = "heap" + // Non-heap memory + AttributeJvmMemoryTypeNonHeap = "non_heap" +) + +const ( + // A thread that has not yet started is in this state + AttributeJvmThreadStateNew = "new" + // A thread executing in the Java virtual machine is in this state + AttributeJvmThreadStateRunnable = "runnable" + // A thread that is blocked waiting for a monitor lock is in this state + AttributeJvmThreadStateBlocked = "blocked" + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + AttributeJvmThreadStateWaiting = "waiting" + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + AttributeJvmThreadStateTimedWaiting = "timed_waiting" + // A thread that has exited is in this state + AttributeJvmThreadStateTerminated = "terminated" +) + +// Kubernetes resource attributes. +const ( + // The name of the cluster. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" + // A pseudo-ID for the cluster, set to the UID of the kube-system namespace. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is ever + // added, we will recommend collecting the k8s.cluster.uid through the + // official APIs. In the meantime, we are able to use the uid of the + // kube-system namespace as a proxy for cluster ID. Read on for the + // rationale.Every object created in a K8S cluster is assigned a distinct UID. The + // kube-system namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the uid of the kube-system + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // ISO/IEC 9834-8 and ITU-T X.667. + // Which states:
    + // If generated according to one of the mechanisms defined in Rec.
    + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism + // chosen).Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + AttributeK8SClusterUID = "k8s.cluster.uid" + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name (container.name). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeK8SContainerRestartCount = "k8s.container.restart_count" + // Last terminated reason of the Container. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + AttributeK8SContainerStatusLastTerminatedReason = "k8s.container.status.last_terminated_reason" + // The name of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" + // The UID of the CronJob. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" + // The UID of the DaemonSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" + // The UID of the Deployment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" + // The UID of the Job. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the namespace that the pod is running in. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" + // The name of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" + // The name of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" + // The UID of the Pod. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" + // The UID of the ReplicaSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" + // The UID of the StatefulSet. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" +) + +// Describes Linux Memory attributes +const ( + // The Linux Slab memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'reclaimable', 'unreclaimable' + AttributeLinuxMemorySlabState = "linux.memory.slab.state" +) + +const ( + // reclaimable + AttributeLinuxMemorySlabStateReclaimable = "reclaimable" + // unreclaimable + AttributeLinuxMemorySlabStateUnreclaimable = "unreclaimable" +) + +// Log attributes +const ( + // The stream associated with the log. See below for a list of well-known values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeLogIostream = "log.iostream" +) + +const ( + // Logs from stdout stream + AttributeLogIostreamStdout = "stdout" + // Events from stderr stream + AttributeLogIostreamStderr = "stderr" +) + +// Attributes for a file to which log was emitted. +const ( + // The basename of the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'audit.log' + AttributeLogFileName = "log.file.name" + // The basename of the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'uuid.log' + AttributeLogFileNameResolved = "log.file.name_resolved" + // The full path to the file. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + AttributeLogFilePath = "log.file.path" + // The full path to the file, with symlinks resolved. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + AttributeLogFilePathResolved = "log.file.path_resolved" +) + +// The generic attributes that may be used in any Log Record. +const ( + // The complete original Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - - + // Something happened', '[INFO] 8/3/24 12:34:56 Something happened' + // Note: This value MAY be added when processing a Log Record which was originally + // transmitted as a string or equivalent data type AND the Body field of the Log + // Record does not contain the same value. (e.g. a syslog or a log record read + // from a file.) + AttributeLogRecordOriginal = "log.record.original" + // A unique identifier for the Log Record. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an Universally Unique Lexicographically Sortable Identifier + // (ULID), but other identifiers (e.g. UUID) may be used as needed. + AttributeLogRecordUID = "log.record.uid" +) + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set messaging.batch.message_count on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use messaging.batch.message_count for batching APIs and SHOULD NOT use + // it for single-message APIs. + AttributeMessagingBatchMessageCount = "messaging.batch.message_count" + // A unique identifier for the client that consumes or produces a message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + AttributeMessagingClientID = "messaging.client.id" + // The name of the consumer group with which a consumer is associated. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-group', 'indexer' + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether messaging.consumer.group.name is applicable and what it means in the + // context of that system. + AttributeMessagingConsumerGroupName = "messaging.consumer.group.name" + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationAnonymous = "messaging.destination.anonymous" + // The message destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD uniquely + // identify the broker. + AttributeMessagingDestinationName = "messaging.destination.name" + // The identifier of the partition messages are sent to or received from, unique + // within the messaging.destination.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1' + AttributeMessagingDestinationPartitionID = "messaging.destination.partition.id" + // The name of the destination subscription from which a message is consumed. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'subscription-a' + // Note: Semantic conventions for individual messaging systems SHOULD document + // whether messaging.destination.subscription.name is applicable and what it means + // in the context of that system. + AttributeMessagingDestinationSubscriptionName = "messaging.destination.subscription.name" + // Low cardinality representation of the messaging destination name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + AttributeMessagingDestinationTemplate = "messaging.destination.template" + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingDestinationTemporary = "messaging.destination.temporary" + // The size of the message body in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. If both + // sizes are known, the uncompressed + // body size should be used. + AttributeMessagingMessageBodySize = "messaging.message.body.size" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MyConversationID' + AttributeMessagingMessageConversationID = "messaging.message.conversation_id" + // The size of the message body and metadata in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If both sizes + // are known, the uncompressed + // size should be used. + AttributeMessagingMessageEnvelopeSize = "messaging.message.envelope.size" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message.id" + // The system-specific name of the messaging operation. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + AttributeMessagingOperationName = "messaging.operation.name" + // A string identifying the type of the messaging operation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + AttributeMessagingOperationType = "messaging.operation.type" + // The messaging system as identified by the client instrumentation. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the client. + // For example, when using Kafka client libraries to communicate with Azure Event + // Hubs, the messaging.system is set to kafka based on the instrumentation's best + // knowledge. + AttributeMessagingSystem = "messaging.system" +) + +const ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + AttributeMessagingOperationTypePublish = "publish" + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + AttributeMessagingOperationTypeCreate = "create" + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + AttributeMessagingOperationTypeReceive = "receive" + // One or more messages are processed by a consumer + AttributeMessagingOperationTypeProcess = "process" + // One or more messages are settled + AttributeMessagingOperationTypeSettle = "settle" + // Deprecated. Use `process` instead + AttributeMessagingOperationTypeDeliver = "deliver" +) + +const ( + // Apache ActiveMQ + AttributeMessagingSystemActivemq = "activemq" + // Amazon Simple Queue Service (SQS) + AttributeMessagingSystemAWSSqs = "aws_sqs" + // Azure Event Grid + AttributeMessagingSystemEventgrid = "eventgrid" + // Azure Event Hubs + AttributeMessagingSystemEventhubs = "eventhubs" + // Azure Service Bus + AttributeMessagingSystemServicebus = "servicebus" + // Google Cloud Pub/Sub + AttributeMessagingSystemGCPPubsub = "gcp_pubsub" + // Java Message Service + AttributeMessagingSystemJms = "jms" + // Apache Kafka + AttributeMessagingSystemKafka = "kafka" + // RabbitMQ + AttributeMessagingSystemRabbitmq = "rabbitmq" + // Apache RocketMQ + AttributeMessagingSystemRocketmq = "rocketmq" + // Apache Pulsar + AttributeMessagingSystemPulsar = "pulsar" +) + +// This group describes attributes specific to Apache Kafka. +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message.id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message.key" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingKafkaMessageTombstone = "messaging.kafka.message.tombstone" + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeMessagingKafkaOffset = "messaging.kafka.offset" +) + +// This group describes attributes specific to RabbitMQ. +const ( + // RabbitMQ message routing key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myKey' + AttributeMessagingRabbitmqDestinationRoutingKey = "messaging.rabbitmq.destination.routing_key" + // RabbitMQ message delivery tag + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 123 + AttributeMessagingRabbitmqMessageDeliveryTag = "messaging.rabbitmq.message.delivery_tag" +) + +// This group describes attributes specific to RocketMQ. +const ( + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqConsumptionModel = "messaging.rocketmq.consumption_model" + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3 + AttributeMessagingRocketmqMessageDelayTimeLevel = "messaging.rocketmq.message.delay_time_level" + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1665987217045 + AttributeMessagingRocketmqMessageDeliveryTimestamp = "messaging.rocketmq.message.delivery_timestamp" + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + AttributeMessagingRocketmqMessageGroup = "messaging.rocketmq.message.group" + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + AttributeMessagingRocketmqMessageKeys = "messaging.rocketmq.message.keys" + // The secondary classifier of message besides topic. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'tagA' + AttributeMessagingRocketmqMessageTag = "messaging.rocketmq.message.tag" + // Type of message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingRocketmqMessageType = "messaging.rocketmq.message.type" + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myNamespace' + AttributeMessagingRocketmqNamespace = "messaging.rocketmq.namespace" +) + +const ( + // Clustering consumption model + AttributeMessagingRocketmqConsumptionModelClustering = "clustering" + // Broadcasting consumption model + AttributeMessagingRocketmqConsumptionModelBroadcasting = "broadcasting" +) + +const ( + // Normal message + AttributeMessagingRocketmqMessageTypeNormal = "normal" + // FIFO message + AttributeMessagingRocketmqMessageTypeFifo = "fifo" + // Delay message + AttributeMessagingRocketmqMessageTypeDelay = "delay" + // Transaction message + AttributeMessagingRocketmqMessageTypeTransaction = "transaction" +) + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // The ack deadline in seconds set for the modify ack deadline request. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 10 + AttributeMessagingGCPPubsubMessageAckDeadline = "messaging.gcp_pubsub.message.ack_deadline" + // The ack id for a given message. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ack_id' + AttributeMessagingGCPPubsubMessageAckID = "messaging.gcp_pubsub.message.ack_id" + // The delivery attempt for a given message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingGCPPubsubMessageDeliveryAttempt = "messaging.gcp_pubsub.message.delivery_attempt" + // The ordering key for a given message. If the attribute is not present, the + // message does not have an ordering key. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ordering_key' + AttributeMessagingGCPPubsubMessageOrderingKey = "messaging.gcp_pubsub.message.ordering_key" +) + +// This group describes attributes specific to Azure Service Bus. +const ( + // Describes the settlement type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeMessagingServicebusDispositionStatus = "messaging.servicebus.disposition_status" + // Number of deliveries that have been attempted for this message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 2 + AttributeMessagingServicebusMessageDeliveryCount = "messaging.servicebus.message.delivery_count" + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingServicebusMessageEnqueuedTime = "messaging.servicebus.message.enqueued_time" +) + +const ( + // Message is completed + AttributeMessagingServicebusDispositionStatusComplete = "complete" + // Message is abandoned + AttributeMessagingServicebusDispositionStatusAbandon = "abandon" + // Message is sent to dead letter queue + AttributeMessagingServicebusDispositionStatusDeadLetter = "dead_letter" + // Message is deferred + AttributeMessagingServicebusDispositionStatusDefer = "defer" +) + +// This group describes attributes specific to Azure Event Hubs. +const ( + // The UTC epoch seconds at which the message has been accepted and stored in the + // entity. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1701393730 + AttributeMessagingEventhubsMessageEnqueuedTime = "messaging.eventhubs.message.enqueued_time" +) + +// These attributes may be used for any network related operation. +const ( + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'DE' + AttributeNetworkCarrierIcc = "network.carrier.icc" + // The mobile carrier country code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '310' + AttributeNetworkCarrierMcc = "network.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '001' + AttributeNetworkCarrierMnc = "network.carrier.mnc" + // The name of the mobile carrier. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'sprint' + AttributeNetworkCarrierName = "network.carrier.name" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'LTE' + AttributeNetworkConnectionSubtype = "network.connection.subtype" + // The internet connection type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'wifi' + AttributeNetworkConnectionType = "network.connection.type" + // The network IO operation direction. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'transmit' + AttributeNetworkIoDirection = "network.io.direction" + // Local address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkLocalAddress = "network.local.address" + // Local port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkLocalPort = "network.local.port" + // Peer address of the network connection - IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + AttributeNetworkPeerAddress = "network.peer.address" + // Peer port number of the network connection. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 65123 + AttributeNetworkPeerPort = "network.peer.port" + // OSI application layer or non-OSI equivalent. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkProtocolName = "network.protocol.name" + // The actual version of the protocol used for network communication. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using ALPN), + // this attribute SHOULD be set to the negotiated version. If the actual protocol + // version is not known, this attribute SHOULD NOT be set. + AttributeNetworkProtocolVersion = "network.protocol.version" + // OSI transport layer or inter-process communication method. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase.Consider always setting the + // transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port 12345. + AttributeNetworkTransport = "network.transport" + // OSI network layer or non-OSI equivalent. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + AttributeNetworkType = "network.type" +) + +const ( + // GPRS + AttributeNetworkConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetworkConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetworkConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetworkConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetworkConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetworkConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetworkConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetworkConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetworkConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetworkConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetworkConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetworkConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetworkConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetworkConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetworkConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetworkConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetworkConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetworkConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetworkConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetworkConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetworkConnectionSubtypeLteCa = "lte_ca" +) + +const ( + // wifi + AttributeNetworkConnectionTypeWifi = "wifi" + // wired + AttributeNetworkConnectionTypeWired = "wired" + // cell + AttributeNetworkConnectionTypeCell = "cell" + // unavailable + AttributeNetworkConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetworkConnectionTypeUnknown = "unknown" +) + +const ( + // transmit + AttributeNetworkIoDirectionTransmit = "transmit" + // receive + AttributeNetworkIoDirectionReceive = "receive" +) + +const ( + // TCP + AttributeNetworkTransportTCP = "tcp" + // UDP + AttributeNetworkTransportUDP = "udp" + // Named or anonymous pipe + AttributeNetworkTransportPipe = "pipe" + // Unix domain socket + AttributeNetworkTransportUnix = "unix" + // QUIC + AttributeNetworkTransportQUIC = "quic" +) + +const ( + // IPv4 + AttributeNetworkTypeIpv4 = "ipv4" + // IPv6 + AttributeNetworkTypeIpv6 = "ipv6" +) + +// An OCI image manifest. +const ( + // The digest of the OCI image manifest. For container images specifically is the + // digest by which the container image is known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows OCI Image Manifest Specification, and specifically the Digest + // property. + // An example can be found in Example Image Manifest. + AttributeOciManifestDigest = "oci.manifest.digest" +) + +// Attributes used by the OpenTracing Shim layer. +const ( + // Parent-child Reference type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + AttributeOpentracingRefType = "opentracing.ref_type" +) + +const ( + // The parent Span depends on the child Span in some capacity + AttributeOpentracingRefTypeChildOf = "child_of" + // The parent Span doesn't depend in any way on the result of the child Span + AttributeOpentracingRefTypeFollowsFrom = "follows_from" +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // Unique identifier for a particular build or compilation of the operating + // system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + AttributeOSBuildID = "os.build_id" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The operating system type. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeOSType = "os.type" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // SunOS, Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// Attributes reserved for OpenTelemetry +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set + // if the status code is UNSET. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + AttributeOTelStatusCode = "otel.status_code" + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'resource not found' + AttributeOTelStatusDescription = "otel.status_description" +) + +const ( + // The operation has been validated by an Application developer or Operator to have completed successfully + AttributeOTelStatusCodeOk = "OK" + // The operation contains an error + AttributeOTelStatusCodeError = "ERROR" +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // The name of the instrumentation scope - (InstrumentationScope.Name in OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + AttributeOTelScopeName = "otel.scope.name" + // The version of the instrumentation scope - (InstrumentationScope.Version in + // OTLP). + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '1.0.0' + AttributeOTelScopeVersion = "otel.scope.version" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// An operating system process. +const ( + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // Specifies whether the context switches for this data point were voluntary or + // involuntary. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessContextSwitchType = "process.context_switch_type" + // The date and time the process was created, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + AttributeProcessCreationTime = "process.creation.time" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The exit code of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 127 + AttributeProcessExitCode = "process.exit.code" + // The date and time the process exited, in ISO 8601 format. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + AttributeProcessExitTime = "process.exit.time" + // The PID of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 23 + AttributeProcessGroupLeaderPID = "process.group_leader.pid" + // Whether the process is connected to an interactive shell. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + AttributeProcessInteractive = "process.interactive" + // The username of the user that owns the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessOwner = "process.owner" + // The type of page fault for this data point. Type major is for major/hard page + // faults, and minor is for minor/soft page faults. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeProcessPagingFaultType = "process.paging.fault_type" + // Parent Process identifier (PPID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 111 + AttributeProcessParentPID = "process.parent_pid" + // Process identifier (PID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The real user ID (RUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1000 + AttributeProcessRealUserID = "process.real_user.id" + // The username of the real user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessRealUserName = "process.real_user.name" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" + // The name of the runtime of this process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // The saved user ID (SUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1002 + AttributeProcessSavedUserID = "process.saved_user.id" + // The username of the saved user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'operator' + AttributeProcessSavedUserName = "process.saved_user.name" + // The PID of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 14 + AttributeProcessSessionLeaderPID = "process.session_leader.pid" + // The effective user ID (EUID) of the process. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1001 + AttributeProcessUserID = "process.user.id" + // The username of the effective user of the process. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'root' + AttributeProcessUserName = "process.user.name" + // Virtual process identifier. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily unique + // across all processes on the host but it is unique within the process namespace + // that the process exists within. + AttributeProcessVpid = "process.vpid" +) + +const ( + // voluntary + AttributeProcessContextSwitchTypeVoluntary = "voluntary" + // involuntary + AttributeProcessContextSwitchTypeInvoluntary = "involuntary" +) + +const ( + // major + AttributeProcessPagingFaultTypeMajor = "major" + // minor + AttributeProcessPagingFaultTypeMinor = "minor" +) + +// Attributes for remote procedure calls. +const ( + // The error codes of the Connect request. Error codes are always string values. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCConnectRPCErrorCode = "rpc.connect_rpc.error_code" + // The numeric status code of the gRPC request. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" + // error.code property of response if it is an error response. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // Compressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageCompressedSize = "rpc.message.compressed_size" + // MUST be calculated as two different counters starting from 1 one for sent + // messages and one for received message. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + AttributeRPCMessageID = "rpc.message.id" + // Whether this is a received or sent message. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageType = "rpc.message.type" + // Uncompressed size of the message in bytes. + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + AttributeRPCMessageUncompressedSize = "rpc.message.uncompressed_size" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeRPCSystem = "rpc.system" +) + +const ( + // cancelled + AttributeRPCConnectRPCErrorCodeCancelled = "cancelled" + // unknown + AttributeRPCConnectRPCErrorCodeUnknown = "unknown" + // invalid_argument + AttributeRPCConnectRPCErrorCodeInvalidArgument = "invalid_argument" + // deadline_exceeded + AttributeRPCConnectRPCErrorCodeDeadlineExceeded = "deadline_exceeded" + // not_found + AttributeRPCConnectRPCErrorCodeNotFound = "not_found" + // already_exists + AttributeRPCConnectRPCErrorCodeAlreadyExists = "already_exists" + // permission_denied + AttributeRPCConnectRPCErrorCodePermissionDenied = "permission_denied" + // resource_exhausted + AttributeRPCConnectRPCErrorCodeResourceExhausted = "resource_exhausted" + // failed_precondition + AttributeRPCConnectRPCErrorCodeFailedPrecondition = "failed_precondition" + // aborted + AttributeRPCConnectRPCErrorCodeAborted = "aborted" + // out_of_range + AttributeRPCConnectRPCErrorCodeOutOfRange = "out_of_range" + // unimplemented + AttributeRPCConnectRPCErrorCodeUnimplemented = "unimplemented" + // internal + AttributeRPCConnectRPCErrorCodeInternal = "internal" + // unavailable + AttributeRPCConnectRPCErrorCodeUnavailable = "unavailable" + // data_loss + AttributeRPCConnectRPCErrorCodeDataLoss = "data_loss" + // unauthenticated + AttributeRPCConnectRPCErrorCodeUnauthenticated = "unauthenticated" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +const ( + // sent + AttributeRPCMessageTypeSent = "SENT" + // received + AttributeRPCMessageTypeReceived = "RECEIVED" +) + +const ( + // gRPC + AttributeRPCSystemGRPC = "grpc" + // Java RMI + AttributeRPCSystemJavaRmi = "java_rmi" + // .NET WCF + AttributeRPCSystemDotnetWcf = "dotnet_wcf" + // Apache Dubbo + AttributeRPCSystemApacheDubbo = "apache_dubbo" + // Connect RPC + AttributeRPCSystemConnectRPC = "connect_rpc" +) + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // Server domain name if available without reverse DNS lookup; otherwise, IP + // address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through an + // intermediary, server.address SHOULD represent the server address behind any + // intermediaries, for example proxies, if it's available. + AttributeServerAddress = "server.address" + // Server port number. + // + // Type: int + // Requirement Level: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through an + // intermediary, server.port SHOULD represent the server port behind any + // intermediaries, for example proxies, if it's available. + AttributeServerPort = "server.port" +) + +// A service instance. +const ( + // The string ID of the service instance. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to + // distinguish instances of the same service that exist at the same time (e.g. + // instances of a horizontally scaled + // service).Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 RFC + // 4122 UUID, but are free to use an inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be used as + // source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: 4d63009a-8d0f-11ee- + // aad7-4c796ed8e320.UUIDs are typically recommended, as only an opaque value for + // the purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // /etc/machine-id file, the underlying + // data, such as pod name and namespace should be treated as confidential, being + // the user's choice to expose it + // or not via another resource attribute.For applications running behind an + // application server (like unicorn), we do not recommend using one identifier + // for all processes participating in the application. Instead, it's recommended + // each division (e.g. a worker + // thread in unicorn) to have its own instance.id.It's not recommended for a + // Collector to set service.instance.id if it can't unambiguously determine the + // service instance that is generating that telemetry. For instance, creating an + // UUID based on pod.name will + // likely be wrong, as the Collector might not know from which container within + // that pod the telemetry originated. + // However, Collectors can set the service.instance.id if they can unambiguously + // determine the service instance + // for that telemetry. This is typically the case for scraping receivers, as they + // know the target address and + // port. + AttributeServiceInstanceID = "service.instance.id" + // Logical name of the service. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The version string of the service API or implementation. The format is not + // defined by these conventions. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + AttributeServiceVersion = "service.version" +) + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // A unique id to identify a session. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionID = "session.id" + // The previous session.id for this user, when known. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + AttributeSessionPreviousID = "session.previous_id" +) + +// SignalR attributes +const ( + // SignalR HTTP connection closure status. + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + AttributeSignalrConnectionStatus = "signalr.connection.status" + // SignalR transport type + // + // Type: Enum + // Requirement Level: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + AttributeSignalrTransport = "signalr.transport" +) + +const ( + // The connection was closed normally + AttributeSignalrConnectionStatusNormalClosure = "normal_closure" + // The connection was closed due to a timeout + AttributeSignalrConnectionStatusTimeout = "timeout" + // The connection was closed because the app is shutting down + AttributeSignalrConnectionStatusAppShutdown = "app_shutdown" +) + +const ( + // ServerSentEvents protocol + AttributeSignalrTransportServerSentEvents = "server_sent_events" + // LongPolling protocol + AttributeSignalrTransportLongPolling = "long_polling" + // WebSockets protocol + AttributeSignalrTransportWebSockets = "web_sockets" +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // Source address - domain name if available without reverse DNS lookup; + // otherwise, IP address or Unix domain socket name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating through + // an intermediary, source.address SHOULD represent the source address behind any + // intermediaries, for example proxies, if it's available. + AttributeSourceAddress = "source.address" + // Source port number + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 3389, 2888 + AttributeSourcePort = "source.port" +) + +// Describes System attributes +const ( + // The device identifier + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '(identifier)' + AttributeSystemDevice = "system.device" +) + +// Describes System CPU attributes +const ( + // The logical CPU number [0..n-1] + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 1 + AttributeSystemCPULogicalNumber = "system.cpu.logical_number" +) + +// Describes System Memory attributes +const ( + // The memory state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free', 'cached' + AttributeSystemMemoryState = "system.memory.state" +) + +const ( + // used + AttributeSystemMemoryStateUsed = "used" + // free + AttributeSystemMemoryStateFree = "free" + // shared + AttributeSystemMemoryStateShared = "shared" + // buffers + AttributeSystemMemoryStateBuffers = "buffers" + // cached + AttributeSystemMemoryStateCached = "cached" +) + +// Describes System Memory Paging attributes +const ( + // The paging access direction + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'in' + AttributeSystemPagingDirection = "system.paging.direction" + // The memory paging state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'free' + AttributeSystemPagingState = "system.paging.state" + // The memory paging type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'minor' + AttributeSystemPagingType = "system.paging.type" +) + +const ( + // in + AttributeSystemPagingDirectionIn = "in" + // out + AttributeSystemPagingDirectionOut = "out" +) + +const ( + // used + AttributeSystemPagingStateUsed = "used" + // free + AttributeSystemPagingStateFree = "free" +) + +const ( + // major + AttributeSystemPagingTypeMajor = "major" + // minor + AttributeSystemPagingTypeMinor = "minor" +) + +// Describes Filesystem attributes +const ( + // The filesystem mode + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'rw, ro' + AttributeSystemFilesystemMode = "system.filesystem.mode" + // The filesystem mount path + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/mnt/data' + AttributeSystemFilesystemMountpoint = "system.filesystem.mountpoint" + // The filesystem state + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'used' + AttributeSystemFilesystemState = "system.filesystem.state" + // The filesystem type + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'ext4' + AttributeSystemFilesystemType = "system.filesystem.type" +) + +const ( + // used + AttributeSystemFilesystemStateUsed = "used" + // free + AttributeSystemFilesystemStateFree = "free" + // reserved + AttributeSystemFilesystemStateReserved = "reserved" +) + +const ( + // fat32 + AttributeSystemFilesystemTypeFat32 = "fat32" + // exfat + AttributeSystemFilesystemTypeExfat = "exfat" + // ntfs + AttributeSystemFilesystemTypeNtfs = "ntfs" + // refs + AttributeSystemFilesystemTypeRefs = "refs" + // hfsplus + AttributeSystemFilesystemTypeHfsplus = "hfsplus" + // ext4 + AttributeSystemFilesystemTypeExt4 = "ext4" +) + +// Describes Network attributes +const ( + // A stateless protocol MUST NOT set this attribute + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'close_wait' + AttributeSystemNetworkState = "system.network.state" +) + +const ( + // close + AttributeSystemNetworkStateClose = "close" + // close_wait + AttributeSystemNetworkStateCloseWait = "close_wait" + // closing + AttributeSystemNetworkStateClosing = "closing" + // delete + AttributeSystemNetworkStateDelete = "delete" + // established + AttributeSystemNetworkStateEstablished = "established" + // fin_wait_1 + AttributeSystemNetworkStateFinWait1 = "fin_wait_1" + // fin_wait_2 + AttributeSystemNetworkStateFinWait2 = "fin_wait_2" + // last_ack + AttributeSystemNetworkStateLastAck = "last_ack" + // listen + AttributeSystemNetworkStateListen = "listen" + // syn_recv + AttributeSystemNetworkStateSynRecv = "syn_recv" + // syn_sent + AttributeSystemNetworkStateSynSent = "syn_sent" + // time_wait + AttributeSystemNetworkStateTimeWait = "time_wait" +) + +// Describes System Process attributes +const ( + // The process state, e.g., Linux Process State Codes + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'running' + AttributeSystemProcessStatus = "system.process.status" +) + +const ( + // running + AttributeSystemProcessStatusRunning = "running" + // sleeping + AttributeSystemProcessStatusSleeping = "sleeping" + // stopped + AttributeSystemProcessStatusStopped = "stopped" + // defunct + AttributeSystemProcessStatusDefunct = "defunct" +) + +// Attributes for telemetry SDK. +const ( + // The language of the telemetry SDK. + // + // Type: Enum + // Requirement Level: Required + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The name of the telemetry SDK as defined above. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the telemetry.sdk.name attribute to + // opentelemetry. + // If another SDK, like a fork or a vendor-provided implementation, is used, this + // SDK MUST set the + // telemetry.sdk.name attribute to the fully-qualified class or module name of + // this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier opentelemetry is reserved and MUST NOT be used in this case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The version string of the telemetry SDK. + // + // Type: string + // Requirement Level: Required + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The name of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set the + // telemetry.distro.name attribute to + // a string starting with opentelemetry-, e.g. opentelemetry-java-instrumentation. + AttributeTelemetryDistroName = "telemetry.distro.name" + // The version string of the auto instrumentation agent or distribution, if used. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2.3' + AttributeTelemetryDistroVersion = "telemetry.distro.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // rust + AttributeTelemetrySDKLanguageRust = "rust" + // swift + AttributeTelemetrySDKLanguageSwift = "swift" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// This group describes attributes specific to [software +// tests](https://en.wikipedia.org/wiki/Software_testing). +const ( + // The fully qualified human readable name of the test case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'org.example.TestCase1.test1', 'example/tests/TestCase1.test1', + // 'ExampleTestCase1_test1' + AttributeTestCaseName = "test.case.name" + // The status of the actual test case result from test execution. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'pass', 'fail' + AttributeTestCaseResultStatus = "test.case.result.status" + // The human readable name of a test suite. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TestSuite1' + AttributeTestSuiteName = "test.suite.name" + // The status of the test suite run. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'success', 'failure', 'skipped', 'aborted', 'timed_out', + // 'in_progress' + AttributeTestSuiteRunStatus = "test.suite.run.status" +) + +const ( + // pass + AttributeTestCaseResultStatusPass = "pass" + // fail + AttributeTestCaseResultStatusFail = "fail" +) + +const ( + // success + AttributeTestSuiteRunStatusSuccess = "success" + // failure + AttributeTestSuiteRunStatusFailure = "failure" + // skipped + AttributeTestSuiteRunStatusSkipped = "skipped" + // aborted + AttributeTestSuiteRunStatusAborted = "aborted" + // timed_out + AttributeTestSuiteRunStatusTimedOut = "timed_out" + // in_progress + AttributeTestSuiteRunStatusInProgress = "in_progress" +) + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// Semantic convention attributes in the TLS namespace. +const ( + // String indicating the cipher used during the current connection. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for tls.cipher MUST be one of the Descriptions of the + // registered TLS Cipher Suits. + AttributeTLSCipher = "tls.cipher" + // PEM-encoded stand-alone certificate offered by the client. This is usually + // mutually-exclusive of client.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSClientCertificate = "tls.client.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the client. This is usually mutually-exclusive of client.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSClientCertificateChain = "tls.client.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSClientHashMd5 = "tls.client.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSClientHashSha1 = "tls.client.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSClientHashSha256 = "tls.client.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSClientIssuer = "tls.client.issuer" + // A hash that identifies clients based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSClientJa3 = "tls.client.ja3" + // Date/Time indicating when client certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSClientNotAfter = "tls.client.not_after" + // Date/Time indicating when client certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSClientNotBefore = "tls.client.not_before" + // Distinguished name of subject of the x.509 certificate presented by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + AttributeTLSClientSubject = "tls.client.subject" + // Array of ciphers offered by the client during the client hello. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', + // 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', '...' + AttributeTLSClientSupportedCiphers = "tls.client.supported_ciphers" + // String indicating the curve used for the given cipher, when applicable + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'secp256r1' + AttributeTLSCurve = "tls.curve" + // Boolean flag indicating if the TLS negotiation was successful and transitioned + // to an encrypted tunnel. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSEstablished = "tls.established" + // String indicating the protocol being tunneled. Per the values in the IANA + // registry, this string should be lower case. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'http/1.1' + AttributeTLSNextProtocol = "tls.next_protocol" + // Normalized lowercase protocol name parsed from original string of the + // negotiated SSL/TLS protocol version + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeTLSProtocolName = "tls.protocol.name" + // Numeric part of the version parsed from the original string of the negotiated + // SSL/TLS protocol version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1.2', '3' + AttributeTLSProtocolVersion = "tls.protocol.version" + // Boolean flag indicating if this TLS connection was resumed from an existing TLS + // negotiation. + // + // Type: boolean + // Requirement Level: Optional + // Stability: experimental + // Examples: True + AttributeTLSResumed = "tls.resumed" + // PEM-encoded stand-alone certificate offered by the server. This is usually + // mutually-exclusive of server.certificate_chain since this value also exists in + // that list. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...' + AttributeTLSServerCertificate = "tls.server.certificate" + // Array of PEM-encoded certificates that make up the certificate chain offered by + // the server. This is usually mutually-exclusive of server.certificate since that + // value should be the first certificate in the chain. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + AttributeTLSServerCertificateChain = "tls.server.certificate_chain" + // Certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + AttributeTLSServerHashMd5 = "tls.server.hash.md5" + // Certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + AttributeTLSServerHashSha1 = "tls.server.hash.sha1" + // Certificate fingerprint using the SHA256 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash values, this + // value should be formatted as an uppercase hash. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + AttributeTLSServerHashSha256 = "tls.server.hash.sha256" + // Distinguished name of subject of the issuer of the x.509 certificate presented + // by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com' + AttributeTLSServerIssuer = "tls.server.issuer" + // A hash that identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + AttributeTLSServerJa3s = "tls.server.ja3s" + // Date/Time indicating when server certificate is no longer considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + AttributeTLSServerNotAfter = "tls.server.not_after" + // Date/Time indicating when server certificate is first considered valid. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + AttributeTLSServerNotBefore = "tls.server.not_before" + // Distinguished name of subject of the x.509 certificate presented by the server. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + AttributeTLSServerSubject = "tls.server.subject" +) + +const ( + // ssl + AttributeTLSProtocolNameSsl = "ssl" + // tls + AttributeTLSProtocolNameTLS = "tls" +) + +// Attributes describing URL. +const ( + // Domain extracted from the url.full, such as "opentelemetry.io". + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, without a + // domain name. In this case, the IP address would go to the domain field. If the + // URL contains a literal IPv6 address enclosed by [ and ], the [ and ] characters + // should also be captured in the domain field. + AttributeURLDomain = "url.domain" + // The file extension extracted from the url.full, excluding the leading dot. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has a file + // extension. When the file name has multiple extensions example.tar.gz, only the + // last one should be captured gz, not tar.gz. + AttributeURLExtension = "url.extension" + // The URI fragment component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'SemConv' + AttributeURLFragment = "url.fragment" + // Absolute URL describing a network resource according to RFC3986 + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', '//localhost' + // Note: For network calls, URL usually has + // scheme://host[:port][path][?query][#fragment] format, where the fragment is not + // transmitted over HTTP, but if it is known, it SHOULD be included nevertheless. + // url.full MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case username and password + // SHOULD be redacted and attribute's value SHOULD be + // https://REDACTED:REDACTED@www.example.com/. + // url.full SHOULD capture the absolute URL when it is available (or can be + // reconstructed). Sensitive content provided in url.full SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLFull = "url.full" + // Unmodified original URL as seen in the event source. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas in + // access logs, the URL is often just represented as a path. This field is meant + // to represent the URL as it was observed, complete or not. + // url.original might contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case password and username + // SHOULD NOT be redacted and attribute's value SHOULD remain the same. + AttributeURLOriginal = "url.original" + // The URI path component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in url.path SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLPath = "url.path" + // Port extracted from the url.full + // + // Type: int + // Requirement Level: Optional + // Stability: experimental + // Examples: 443 + AttributeURLPort = "url.port" + // The URI query component + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in url.query SHOULD be scrubbed when + // instrumentations can identify it. + AttributeURLQuery = "url.query" + // The highest registered url domain, stripped of the subdomain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the public suffix list. For + // example, the registered domain for foo.example.com is example.com. Trying to + // approximate this by simply taking the last two labels will not work well for + // TLDs such as co.uk. + AttributeURLRegisteredDomain = "url.registered_domain" + // The URI scheme component identifying the used protocol. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + AttributeURLScheme = "url.scheme" + // The subdomain portion of a fully qualified domain name includes all of the + // names except the host name under the registered_domain. In a partially + // qualified domain, or if the qualification level of the full name cannot be + // determined, subdomain contains all of the names below the registered domain. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of www.east.mydomain.co.uk is east. If the domain + // has multiple levels of subdomain, such as sub2.sub1.example.com, the subdomain + // field should contain sub2.sub1, with no trailing period. + AttributeURLSubdomain = "url.subdomain" + // The low-cardinality template of an absolute path reference. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + AttributeURLTemplate = "url.template" + // The effective top level domain (eTLD), also known as the domain suffix, is the + // last part of the domain name. For example, the top level domain for example.com + // is com. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the public suffix list. + AttributeURLTopLevelDomain = "url.top_level_domain" +) + +// Describes user-agent attributes. +const ( + // Name of the user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: Example of extracting browser's name from original string. In the case of + // using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // name SHOULD be selected. In such a scenario it should align with + // user_agent.version + AttributeUserAgentName = "user_agent.name" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Requirement Level: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU iPhone + // OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 grpc-java- + // okhttp/1.27.2' + AttributeUserAgentOriginal = "user_agent.original" + // Version of the user-agent extracted from original. Usually refers to the + // browser's version + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: Example of extracting browser's version from original string. In the case + // of using a user-agent for non-browser products, such as microservices with + // multiple names/versions inside the user_agent.original, the most significant + // version SHOULD be selected. In such a scenario it should align with + // user_agent.name + AttributeUserAgentVersion = "user_agent.version" +) + +// Describes information about the user. +const ( + // User email address. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a.einstein@example.com' + AttributeUserEmail = "user.email" + // User's full name + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Albert Einstein' + AttributeUserFullName = "user.full_name" + // Unique user hash to correlate information for a user in anonymized form. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '364fc68eaf4c8acec74a4e52d7d1feaa' + // Note: Useful if user.id or user.name contain confidential information and + // cannot be used. + AttributeUserHash = "user.hash" + // Unique identifier of the user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'S-1-5-21-202424912787-2692429404-2351956786-1000' + AttributeUserID = "user.id" + // Short name or login/username of the user. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'a.einstein' + AttributeUserName = "user.name" + // Array of user roles at the time of the event. + // + // Type: string[] + // Requirement Level: Optional + // Stability: experimental + // Examples: 'admin', 'reporting_user' + AttributeUserRoles = "user.roles" +) + +// Describes V8 JS Engine Runtime related attributes. +const ( + // The type of garbage collection. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + AttributeV8jsGcType = "v8js.gc.type" + // The name of the space type of heap memory. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Note: Value can be retrieved from value space_name of + // v8.getHeapSpaceStatistics() + AttributeV8jsHeapSpaceName = "v8js.heap.space.name" +) + +const ( + // Major (Mark Sweep Compact) + AttributeV8jsGcTypeMajor = "major" + // Minor (Scavenge) + AttributeV8jsGcTypeMinor = "minor" + // Incremental (Incremental Marking) + AttributeV8jsGcTypeIncremental = "incremental" + // Weak Callbacks (Process Weak Callbacks) + AttributeV8jsGcTypeWeakcb = "weakcb" +) + +const ( + // New memory space + AttributeV8jsHeapSpaceNameNewSpace = "new_space" + // Old memory space + AttributeV8jsHeapSpaceNameOldSpace = "old_space" + // Code memory space + AttributeV8jsHeapSpaceNameCodeSpace = "code_space" + // Map memory space + AttributeV8jsHeapSpaceNameMapSpace = "map_space" + // Large object memory space + AttributeV8jsHeapSpaceNameLargeObjectSpace = "large_object_space" +) + +// This group defines the attributes for [Version Control Systems +// (VCS)](https://en.wikipedia.org/wiki/Version_control). +const ( + // The ID of the change (pull request/merge request) if applicable. This is + // usually a unique (within repository) identifier generated by the VCS system. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '123' + AttributeVcsRepositoryChangeID = "vcs.repository.change.id" + // The human readable title of the change (pull request/merge request). This title + // is often a brief summary of the change and may get merged in to a ref as the + // commit summary. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'Fixes broken thing', 'feat: add my new feature', '[chore] update + // dependency' + AttributeVcsRepositoryChangeTitle = "vcs.repository.change.title" + // The name of the reference such as branch or tag in the repository. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'my-feature-branch', 'tag-1-test' + AttributeVcsRepositoryRefName = "vcs.repository.ref.name" + // The revision, literally revised version, The revision most often refers to a + // commit object in Git, or a revision number in SVN. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc', + // 'main', '123', 'HEAD' + // Note: The revision can be a full hash value (see glossary), + // of the recorded change to a ref within a repository pointing to a + // commit commit object. It does + // not necessarily have to be a hash; it can simply define a + // revision number + // which is an integer that is monotonically increasing. In cases where + // it is identical to the ref.name, it SHOULD still be included. It is + // up to the implementer to decide which value to set as the revision + // based on the VCS system and situational context. + AttributeVcsRepositoryRefRevision = "vcs.repository.ref.revision" + // The type of the reference in the repository. + // + // Type: Enum + // Requirement Level: Optional + // Stability: experimental + // Examples: 'branch', 'tag' + AttributeVcsRepositoryRefType = "vcs.repository.ref.type" + // The URL of the repository providing the complete address in order to locate and + // identify the repository. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'https://github.com/opentelemetry/open-telemetry-collector-contrib', + // 'https://gitlab.com/my-org/my-project/my-projects-project/repo' + AttributeVcsRepositoryURLFull = "vcs.repository.url.full" +) + +const ( + // [branch](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch) + AttributeVcsRepositoryRefTypeBranch = "branch" + // [tag](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag) + AttributeVcsRepositoryRefTypeTag = "tag" +) + +// The attributes used to describe the packaged software running the +// application code. +const ( + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" + // The name of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Requirement Level: Optional + // Stability: experimental + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" +) + +func GetAttribute_groupSemanticConventionAttributeNames() []string { + return []string{ + AttributeAndroidOSAPILevel, + AttributeArtifactAttestationFilename, + AttributeArtifactAttestationHash, + AttributeArtifactAttestationID, + AttributeArtifactFilename, + AttributeArtifactHash, + AttributeArtifactPurl, + AttributeArtifactVersion, + AttributeAspnetcoreRateLimitingResult, + AttributeAspnetcoreDiagnosticsHandlerType, + AttributeAspnetcoreDiagnosticsExceptionResult, + AttributeAspnetcoreRateLimitingPolicy, + AttributeAspnetcoreRequestIsUnhandled, + AttributeAspnetcoreRoutingIsFallback, + AttributeAspnetcoreRoutingMatchStatus, + AttributeAWSRequestID, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSECSTaskID, + AttributeAWSECSClusterARN, + AttributeAWSECSContainerARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupARNs, + AttributeAWSLogGroupNames, + AttributeAWSLogStreamARNs, + AttributeAWSLogStreamNames, + AttributeAWSLambdaInvokedARN, + AttributeAWSS3Bucket, + AttributeAWSS3CopySource, + AttributeAWSS3Delete, + AttributeAWSS3Key, + AttributeAWSS3PartNumber, + AttributeAWSS3UploadID, + AttributeAzServiceRequestID, + AttributeBrowserBrands, + AttributeBrowserLanguage, + AttributeBrowserMobile, + AttributeBrowserPlatform, + AttributeCicdPipelineName, + AttributeCicdPipelineRunID, + AttributeCicdPipelineTaskName, + AttributeCicdPipelineTaskRunID, + AttributeCicdPipelineTaskRunURLFull, + AttributeCicdPipelineTaskType, + AttributeClientAddress, + AttributeClientPort, + AttributeCloudAccountID, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeCloudProvider, + AttributeCloudRegion, + AttributeCloudResourceID, + AttributeCloudeventsEventID, + AttributeCloudeventsEventSource, + AttributeCloudeventsEventSpecVersion, + AttributeCloudeventsEventSubject, + AttributeCloudeventsEventType, + AttributeCodeColumn, + AttributeCodeFilepath, + AttributeCodeFunction, + AttributeCodeLineNumber, + AttributeCodeNamespace, + AttributeCodeStacktrace, + AttributeContainerCommand, + AttributeContainerCommandArgs, + AttributeContainerCommandLine, + AttributeContainerID, + AttributeContainerImageID, + AttributeContainerImageName, + AttributeContainerImageRepoDigests, + AttributeContainerImageTags, + AttributeContainerName, + AttributeContainerRuntime, + AttributeCPUMode, + AttributeDBClientConnectionPoolName, + AttributeDBClientConnectionState, + AttributeDBCollectionName, + AttributeDBNamespace, + AttributeDBOperationBatchSize, + AttributeDBOperationName, + AttributeDBQueryText, + AttributeDBSystem, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraCoordinatorDC, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraIdempotence, + AttributeDBCassandraPageSize, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCosmosDBClientID, + AttributeDBCosmosDBConnectionMode, + AttributeDBCosmosDBOperationType, + AttributeDBCosmosDBRequestCharge, + AttributeDBCosmosDBRequestContentLength, + AttributeDBCosmosDBStatusCode, + AttributeDBCosmosDBSubStatusCode, + AttributeDBElasticsearchNodeName, + AttributeDeploymentEnvironmentName, + AttributeDeploymentID, + AttributeDeploymentName, + AttributeDeploymentStatus, + AttributeAndroidState, + AttributeDestinationAddress, + AttributeDestinationPort, + AttributeDeviceID, + AttributeDeviceManufacturer, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeDiskIoDirection, + AttributeDNSQuestionName, + AttributeErrorType, + AttributeEventName, + AttributeExceptionEscaped, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionType, + AttributeFaaSColdstart, + AttributeFaaSCron, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentName, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSInstance, + AttributeFaaSInvocationID, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeFaaSMaxMemory, + AttributeFaaSName, + AttributeFaaSTime, + AttributeFaaSTrigger, + AttributeFaaSVersion, + AttributeFeatureFlagKey, + AttributeFeatureFlagProviderName, + AttributeFeatureFlagVariant, + AttributeFileDirectory, + AttributeFileExtension, + AttributeFileName, + AttributeFilePath, + AttributeFileSize, + AttributeGCPClientService, + AttributeGCPCloudRunJobExecution, + AttributeGCPCloudRunJobTaskIndex, + AttributeGCPGceInstanceHostname, + AttributeGCPGceInstanceName, + AttributeGenAiCompletion, + AttributeGenAiOperationName, + AttributeGenAiPrompt, + AttributeGenAiRequestFrequencyPenalty, + AttributeGenAiRequestMaxTokens, + AttributeGenAiRequestModel, + AttributeGenAiRequestPresencePenalty, + AttributeGenAiRequestStopSequences, + AttributeGenAiRequestTemperature, + AttributeGenAiRequestTopK, + AttributeGenAiRequestTopP, + AttributeGenAiResponseFinishReasons, + AttributeGenAiResponseID, + AttributeGenAiResponseModel, + AttributeGenAiSystem, + AttributeGenAiTokenType, + AttributeGenAiUsageInputTokens, + AttributeGenAiUsageOutputTokens, + AttributeGoMemoryType, + AttributeGraphqlDocument, + AttributeGraphqlOperationName, + AttributeGraphqlOperationType, + AttributeHerokuAppID, + AttributeHerokuReleaseCommit, + AttributeHerokuReleaseCreationTimestamp, + AttributeHostArch, + AttributeHostCPUCacheL2Size, + AttributeHostCPUFamily, + AttributeHostCPUModelID, + AttributeHostCPUModelName, + AttributeHostCPUStepping, + AttributeHostCPUVendorID, + AttributeHostID, + AttributeHostImageID, + AttributeHostImageName, + AttributeHostImageVersion, + AttributeHostIP, + AttributeHostMac, + AttributeHostName, + AttributeHostType, + AttributeHTTPConnectionState, + AttributeHTTPRequestBodySize, + AttributeHTTPRequestMethod, + AttributeHTTPRequestMethodOriginal, + AttributeHTTPRequestResendCount, + AttributeHTTPRequestSize, + AttributeHTTPResponseBodySize, + AttributeHTTPResponseSize, + AttributeHTTPResponseStatusCode, + AttributeHTTPRoute, + AttributeJvmBufferPoolName, + AttributeJvmGcAction, + AttributeJvmGcName, + AttributeJvmMemoryPoolName, + AttributeJvmMemoryType, + AttributeJvmThreadDaemon, + AttributeJvmThreadState, + AttributeK8SClusterName, + AttributeK8SClusterUID, + AttributeK8SContainerName, + AttributeK8SContainerRestartCount, + AttributeK8SContainerStatusLastTerminatedReason, + AttributeK8SCronJobName, + AttributeK8SCronJobUID, + AttributeK8SDaemonSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDeploymentName, + AttributeK8SDeploymentUID, + AttributeK8SJobName, + AttributeK8SJobUID, + AttributeK8SNamespaceName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SPodName, + AttributeK8SPodUID, + AttributeK8SReplicaSetName, + AttributeK8SReplicaSetUID, + AttributeK8SStatefulSetName, + AttributeK8SStatefulSetUID, + AttributeLinuxMemorySlabState, + AttributeLogIostream, + AttributeLogFileName, + AttributeLogFileNameResolved, + AttributeLogFilePath, + AttributeLogFilePathResolved, + AttributeLogRecordOriginal, + AttributeLogRecordUID, + AttributeMessagingBatchMessageCount, + AttributeMessagingClientID, + AttributeMessagingConsumerGroupName, + AttributeMessagingDestinationAnonymous, + AttributeMessagingDestinationName, + AttributeMessagingDestinationPartitionID, + AttributeMessagingDestinationSubscriptionName, + AttributeMessagingDestinationTemplate, + AttributeMessagingDestinationTemporary, + AttributeMessagingMessageBodySize, + AttributeMessagingMessageConversationID, + AttributeMessagingMessageEnvelopeSize, + AttributeMessagingMessageID, + AttributeMessagingOperationName, + AttributeMessagingOperationType, + AttributeMessagingSystem, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaMessageTombstone, + AttributeMessagingKafkaOffset, + AttributeMessagingRabbitmqDestinationRoutingKey, + AttributeMessagingRabbitmqMessageDeliveryTag, + AttributeMessagingRocketmqConsumptionModel, + AttributeMessagingRocketmqMessageDelayTimeLevel, + AttributeMessagingRocketmqMessageDeliveryTimestamp, + AttributeMessagingRocketmqMessageGroup, + AttributeMessagingRocketmqMessageKeys, + AttributeMessagingRocketmqMessageTag, + AttributeMessagingRocketmqMessageType, + AttributeMessagingRocketmqNamespace, + AttributeMessagingGCPPubsubMessageAckDeadline, + AttributeMessagingGCPPubsubMessageAckID, + AttributeMessagingGCPPubsubMessageDeliveryAttempt, + AttributeMessagingGCPPubsubMessageOrderingKey, + AttributeMessagingServicebusDispositionStatus, + AttributeMessagingServicebusMessageDeliveryCount, + AttributeMessagingServicebusMessageEnqueuedTime, + AttributeMessagingEventhubsMessageEnqueuedTime, + AttributeNetworkCarrierIcc, + AttributeNetworkCarrierMcc, + AttributeNetworkCarrierMnc, + AttributeNetworkCarrierName, + AttributeNetworkConnectionSubtype, + AttributeNetworkConnectionType, + AttributeNetworkIoDirection, + AttributeNetworkLocalAddress, + AttributeNetworkLocalPort, + AttributeNetworkPeerAddress, + AttributeNetworkPeerPort, + AttributeNetworkProtocolName, + AttributeNetworkProtocolVersion, + AttributeNetworkTransport, + AttributeNetworkType, + AttributeOciManifestDigest, + AttributeOpentracingRefType, + AttributeOSBuildID, + AttributeOSDescription, + AttributeOSName, + AttributeOSType, + AttributeOSVersion, + AttributeOTelStatusCode, + AttributeOTelStatusDescription, + AttributeOTelScopeName, + AttributeOTelScopeVersion, + AttributePeerService, + AttributeProcessCommand, + AttributeProcessCommandArgs, + AttributeProcessCommandLine, + AttributeProcessContextSwitchType, + AttributeProcessCreationTime, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessExitCode, + AttributeProcessExitTime, + AttributeProcessGroupLeaderPID, + AttributeProcessInteractive, + AttributeProcessOwner, + AttributeProcessPagingFaultType, + AttributeProcessParentPID, + AttributeProcessPID, + AttributeProcessRealUserID, + AttributeProcessRealUserName, + AttributeProcessRuntimeDescription, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessSavedUserID, + AttributeProcessSavedUserName, + AttributeProcessSessionLeaderPID, + AttributeProcessUserID, + AttributeProcessUserName, + AttributeProcessVpid, + AttributeRPCConnectRPCErrorCode, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcVersion, + AttributeRPCMessageCompressedSize, + AttributeRPCMessageID, + AttributeRPCMessageType, + AttributeRPCMessageUncompressedSize, + AttributeRPCMethod, + AttributeRPCService, + AttributeRPCSystem, + AttributeServerAddress, + AttributeServerPort, + AttributeServiceInstanceID, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceVersion, + AttributeSessionID, + AttributeSessionPreviousID, + AttributeSignalrConnectionStatus, + AttributeSignalrTransport, + AttributeSourceAddress, + AttributeSourcePort, + AttributeSystemDevice, + AttributeSystemCPULogicalNumber, + AttributeSystemMemoryState, + AttributeSystemPagingDirection, + AttributeSystemPagingState, + AttributeSystemPagingType, + AttributeSystemFilesystemMode, + AttributeSystemFilesystemMountpoint, + AttributeSystemFilesystemState, + AttributeSystemFilesystemType, + AttributeSystemNetworkState, + AttributeSystemProcessStatus, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKName, + AttributeTelemetrySDKVersion, + AttributeTelemetryDistroName, + AttributeTelemetryDistroVersion, + AttributeTestCaseName, + AttributeTestCaseResultStatus, + AttributeTestSuiteName, + AttributeTestSuiteRunStatus, + AttributeThreadID, + AttributeThreadName, + AttributeTLSCipher, + AttributeTLSClientCertificate, + AttributeTLSClientCertificateChain, + AttributeTLSClientHashMd5, + AttributeTLSClientHashSha1, + AttributeTLSClientHashSha256, + AttributeTLSClientIssuer, + AttributeTLSClientJa3, + AttributeTLSClientNotAfter, + AttributeTLSClientNotBefore, + AttributeTLSClientSubject, + AttributeTLSClientSupportedCiphers, + AttributeTLSCurve, + AttributeTLSEstablished, + AttributeTLSNextProtocol, + AttributeTLSProtocolName, + AttributeTLSProtocolVersion, + AttributeTLSResumed, + AttributeTLSServerCertificate, + AttributeTLSServerCertificateChain, + AttributeTLSServerHashMd5, + AttributeTLSServerHashSha1, + AttributeTLSServerHashSha256, + AttributeTLSServerIssuer, + AttributeTLSServerJa3s, + AttributeTLSServerNotAfter, + AttributeTLSServerNotBefore, + AttributeTLSServerSubject, + AttributeURLDomain, + AttributeURLExtension, + AttributeURLFragment, + AttributeURLFull, + AttributeURLOriginal, + AttributeURLPath, + AttributeURLPort, + AttributeURLQuery, + AttributeURLRegisteredDomain, + AttributeURLScheme, + AttributeURLSubdomain, + AttributeURLTemplate, + AttributeURLTopLevelDomain, + AttributeUserAgentName, + AttributeUserAgentOriginal, + AttributeUserAgentVersion, + AttributeUserEmail, + AttributeUserFullName, + AttributeUserHash, + AttributeUserID, + AttributeUserName, + AttributeUserRoles, + AttributeV8jsGcType, + AttributeV8jsHeapSpaceName, + AttributeVcsRepositoryChangeID, + AttributeVcsRepositoryChangeTitle, + AttributeVcsRepositoryRefName, + AttributeVcsRepositoryRefRevision, + AttributeVcsRepositoryRefType, + AttributeVcsRepositoryURLFull, + AttributeWebEngineDescription, + AttributeWebEngineName, + AttributeWebEngineVersion, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go new file mode 100644 index 00000000000..ac6893c3b26 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_event.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetEventSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go new file mode 100644 index 00000000000..bb89e4806f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_resource.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go new file mode 100644 index 00000000000..380529563a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/generated_trace.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go new file mode 100644 index 00000000000..be4e3241f25 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.27.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.27.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.27.0" diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go index 1a9bc437647..801970ec372 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go @@ -316,7 +316,7 @@ const ( // // Take care not to use the "invoked ARN" directly but replace any // alias suffix with the resolved function version, as the same runtime instance - // may be invokable with multiple + // may be invocable with multiple // different aliases.
      //
    • GCP: The URI of the resource
    • //
    • Azure: The Fully Qualified Resource ID.
    • diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go index d740be56d50..896db946a37 100644 --- a/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.9.0/generated_resource.go @@ -336,7 +336,7 @@ const ( //
    // Take care not to use the "invoked ARN" directly but replace any // alias suffix with the resolved function version, as the same runtime instance - // may be invokable with multiple + // may be invocable with multiple // different aliases.
      //
    • GCP: The URI of the resource
    • //
    • Azure: The Fully Qualified Resource ID.
    • diff --git a/vendor/go.opentelemetry.io/collector/service/attributes.go b/vendor/go.opentelemetry.io/collector/service/attributes.go new file mode 100644 index 00000000000..056ea2a0453 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/attributes.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package service // import "go.opentelemetry.io/collector/service" + +import ( + sdkresource "go.opentelemetry.io/otel/sdk/resource" + + "go.opentelemetry.io/collector/service/telemetry" +) + +func attributes(res *sdkresource.Resource, cfg telemetry.Config) map[string]interface{} { + attrs := map[string]interface{}{} + for _, r := range res.Attributes() { + attrs[string(r.Key)] = r.Value.AsString() + } + + for k, v := range cfg.Resource { + if v != nil { + attrs[k] = *v + } else { + // the new value is nil, delete the existing key + delete(attrs, k) + } + } + return attrs +} diff --git a/vendor/go.opentelemetry.io/collector/service/documentation.md b/vendor/go.opentelemetry.io/collector/service/documentation.md new file mode 100644 index 00000000000..346def750d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/documentation.md @@ -0,0 +1,55 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# service + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### otelcol_process_cpu_seconds + +Total CPU user and system time in seconds [alpha] + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Double | true | + +### otelcol_process_memory_rss + +Total physical memory (resident set size) [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### otelcol_process_runtime_heap_alloc_bytes + +Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### otelcol_process_runtime_total_alloc_bytes + +Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [alpha] + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| By | Sum | Int | true | + +### otelcol_process_runtime_total_sys_memory_bytes + +Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [alpha] + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### otelcol_process_uptime + +Uptime of the process [alpha] + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| s | Sum | Double | true | diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go index e05c5aa01f0..fcefcb33f20 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/extensions.go @@ -10,12 +10,16 @@ import ( "sort" "go.uber.org/multierr" + "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/extensioncapabilities" + "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/components" - "go.opentelemetry.io/collector/service/internal/servicetelemetry" + "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/internal/zpages" ) @@ -23,10 +27,11 @@ const zExtensionName = "zextensionname" // Extensions is a map of extensions created from extension configs. type Extensions struct { - telemetry servicetelemetry.TelemetrySettings + telemetry component.TelemetrySettings extMap map[component.ID]extension.Extension - instanceIDs map[component.ID]*component.InstanceID + instanceIDs map[component.ID]*componentstatus.InstanceID extensionIDs []component.ID // start order (and reverse stop order) + reporter status.Reporter } // Start starts all extensions. @@ -37,18 +42,20 @@ func (bes *Extensions) Start(ctx context.Context, host component.Host) error { extLogger.Info("Extension is starting...") instanceID := bes.instanceIDs[extID] ext := bes.extMap[extID] - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStarting), + componentstatus.NewEvent(componentstatus.StatusStarting), ) if err := ext.Start(ctx, host); err != nil { - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(err), + componentstatus.NewPermanentErrorEvent(err), ) + // We log with zap.AddStacktrace(zap.DPanicLevel) to avoid adding the stack trace to the error log + extLogger.WithOptions(zap.AddStacktrace(zap.DPanicLevel)).Error("Failed to start extension", zap.Error(err)) return err } - bes.telemetry.Status.ReportOKIfStarting(instanceID) + bes.reporter.ReportOKIfStarting(instanceID) extLogger.Info("Extension started.") } return nil @@ -62,21 +69,21 @@ func (bes *Extensions) Shutdown(ctx context.Context) error { extID := bes.extensionIDs[i] instanceID := bes.instanceIDs[extID] ext := bes.extMap[extID] - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopping), + componentstatus.NewEvent(componentstatus.StatusStopping), ) if err := ext.Shutdown(ctx); err != nil { - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(err), + componentstatus.NewPermanentErrorEvent(err), ) errs = multierr.Append(errs, err) continue } - bes.telemetry.Status.ReportStatus( + bes.reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopped), + componentstatus.NewEvent(componentstatus.StatusStopped), ) } @@ -86,7 +93,7 @@ func (bes *Extensions) Shutdown(ctx context.Context) error { func (bes *Extensions) NotifyPipelineReady() error { for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if pw, ok := ext.(extension.PipelineWatcher); ok { + if pw, ok := ext.(extensioncapabilities.PipelineWatcher); ok { if err := pw.Ready(); err != nil { return fmt.Errorf("failed to notify extension %q: %w", extID, err) } @@ -99,7 +106,7 @@ func (bes *Extensions) NotifyPipelineNotReady() error { var errs error for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if pw, ok := ext.(extension.PipelineWatcher); ok { + if pw, ok := ext.(extensioncapabilities.PipelineWatcher); ok { errs = multierr.Append(errs, pw.NotReady()) } } @@ -110,7 +117,7 @@ func (bes *Extensions) NotifyConfig(ctx context.Context, conf *confmap.Conf) err var errs error for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if cw, ok := ext.(extension.ConfigWatcher); ok { + if cw, ok := ext.(extensioncapabilities.ConfigWatcher); ok { clonedConf := confmap.NewFromStringMap(conf.ToStringMap()) errs = multierr.Append(errs, cw.NotifyConfig(ctx, clonedConf)) } @@ -118,10 +125,10 @@ func (bes *Extensions) NotifyConfig(ctx context.Context, conf *confmap.Conf) err return errs } -func (bes *Extensions) NotifyComponentStatusChange(source *component.InstanceID, event *component.StatusEvent) { +func (bes *Extensions) NotifyComponentStatusChange(source *componentstatus.InstanceID, event *componentstatus.Event) { for _, extID := range bes.extensionIDs { ext := bes.extMap[extID] - if sw, ok := ext.(extension.StatusWatcher); ok { + if sw, ok := ext.(componentstatus.Watcher); ok { sw.ComponentStatusChanged(source, event) } } @@ -163,30 +170,51 @@ func (bes *Extensions) HandleZPages(w http.ResponseWriter, r *http.Request) { // Settings holds configuration for building Extensions. type Settings struct { - Telemetry servicetelemetry.TelemetrySettings - BuildInfo component.BuildInfo + Telemetry component.TelemetrySettings + BuildInfo component.BuildInfo + ModuleInfo extension.ModuleInfo // Extensions builder for extensions. - Extensions *extension.Builder + Extensions builders.Extension +} + +type Option interface { + apply(*Extensions) +} + +type optionFunc func(*Extensions) + +func (of optionFunc) apply(e *Extensions) { + of(e) +} + +func WithReporter(reporter status.Reporter) Option { + return optionFunc(func(e *Extensions) { + e.reporter = reporter + }) } // New creates a new Extensions from Config. -func New(ctx context.Context, set Settings, cfg Config) (*Extensions, error) { +func New(ctx context.Context, set Settings, cfg Config, options ...Option) (*Extensions, error) { exts := &Extensions{ telemetry: set.Telemetry, extMap: make(map[component.ID]extension.Extension), - instanceIDs: make(map[component.ID]*component.InstanceID), + instanceIDs: make(map[component.ID]*componentstatus.InstanceID), extensionIDs: make([]component.ID, 0, len(cfg)), + reporter: &nopReporter{}, } + + for _, opt := range options { + opt.apply(exts) + } + for _, extID := range cfg { - instanceID := &component.InstanceID{ - ID: extID, - Kind: component.KindExtension, - } - extSet := extension.CreateSettings{ + instanceID := componentstatus.NewInstanceID(extID, component.KindExtension) + extSet := extension.Settings{ ID: extID, - TelemetrySettings: set.Telemetry.ToComponentTelemetrySettings(instanceID), + TelemetrySettings: set.Telemetry, BuildInfo: set.BuildInfo, + ModuleInfo: set.ModuleInfo, } extSet.TelemetrySettings.Logger = components.ExtensionLogger(set.Telemetry.Logger, extID) @@ -210,3 +238,11 @@ func New(ctx context.Context, set Settings, cfg Config) (*Extensions, error) { exts.extensionIDs = order return exts, nil } + +type nopReporter struct{} + +func (r *nopReporter) Ready() {} + +func (r *nopReporter) ReportStatus(*componentstatus.InstanceID, *componentstatus.Event) {} + +func (r *nopReporter) ReportOKIfStarting(*componentstatus.InstanceID) {} diff --git a/vendor/go.opentelemetry.io/collector/service/extensions/graph.go b/vendor/go.opentelemetry.io/collector/service/extensions/graph.go index 7099d018a1f..3d3fa4d2b41 100644 --- a/vendor/go.opentelemetry.io/collector/service/extensions/graph.go +++ b/vendor/go.opentelemetry.io/collector/service/extensions/graph.go @@ -13,7 +13,7 @@ import ( "gonum.org/v1/gonum/graph/topo" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/extensioncapabilities" ) type node struct { @@ -38,7 +38,7 @@ func computeOrder(exts *Extensions) ([]component.ID, error) { } for extID, ext := range exts.extMap { n := nodes[extID] - if dep, ok := ext.(extension.Dependent); ok { + if dep, ok := ext.(extensioncapabilities.Dependent); ok { for _, depID := range dep.Dependencies() { if d, ok := nodes[depID]; ok { graph.SetEdge(graph.NewEdge(d, n)) @@ -73,5 +73,5 @@ func cycleErr(err error, cycles [][]graph.Node) error { names = append(names, node.extID.String()) } cycleStr := "[" + strings.Join(names, " -> ") + "]" - return fmt.Errorf("unable to order extenions by dependencies, cycle found %s: %w", cycleStr, err) + return fmt.Errorf("unable to order extensions by dependencies, cycle found %s: %w", cycleStr, err) } diff --git a/vendor/go.opentelemetry.io/collector/service/host.go b/vendor/go.opentelemetry.io/collector/service/host.go deleted file mode 100644 index ce8dc530d40..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/host.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package service // import "go.opentelemetry.io/collector/service" - -import ( - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/connector" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/service/extensions" - "go.opentelemetry.io/collector/service/internal/graph" -) - -// TODO: remove as part of https://github.com/open-telemetry/opentelemetry-collector/issues/7370 for service 1.0 -type getExporters interface { - GetExporters() map[component.DataType]map[component.ID]component.Component -} - -var _ getExporters = (*serviceHost)(nil) -var _ component.Host = (*serviceHost)(nil) - -type serviceHost struct { - asyncErrorChannel chan error - receivers *receiver.Builder - processors *processor.Builder - exporters *exporter.Builder - connectors *connector.Builder - extensions *extension.Builder - - buildInfo component.BuildInfo - - pipelines *graph.Graph - serviceExtensions *extensions.Extensions -} - -func (host *serviceHost) GetFactory(kind component.Kind, componentType component.Type) component.Factory { - switch kind { - case component.KindReceiver: - return host.receivers.Factory(componentType) - case component.KindProcessor: - return host.processors.Factory(componentType) - case component.KindExporter: - return host.exporters.Factory(componentType) - case component.KindConnector: - return host.connectors.Factory(componentType) - case component.KindExtension: - return host.extensions.Factory(componentType) - } - return nil -} - -func (host *serviceHost) GetExtensions() map[component.ID]component.Component { - return host.serviceExtensions.GetExtensions() -} - -// Deprecated: [0.79.0] This function will be removed in the future. -// Several components in the contrib repository use this function so it cannot be removed -// before those cases are removed. In most cases, use of this function can be replaced by a -// connector. See https://github.com/open-telemetry/opentelemetry-collector/issues/7370 and -// https://github.com/open-telemetry/opentelemetry-collector/pull/7390#issuecomment-1483710184 -// for additional information. -func (host *serviceHost) GetExporters() map[component.DataType]map[component.ID]component.Component { - return host.pipelines.GetExporters() -} - -func (host *serviceHost) notifyComponentStatusChange(source *component.InstanceID, event *component.StatusEvent) { - host.serviceExtensions.NotifyComponentStatusChange(source, event) - if event.Status() == component.StatusFatalError { - host.asyncErrorChannel <- event.Err() - } -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go new file mode 100644 index 00000000000..bf5844bd932 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/builders.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "errors" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +var ( + errNilNextConsumer = errors.New("nil next Consumer") + nopType = component.MustNewType("nop") +) + +// logStabilityLevel logs the stability level of a component. The log level is set to info for +// undefined, unmaintained, deprecated and development. The log level is set to debug +// for alpha, beta and stable. +func logStabilityLevel(logger *zap.Logger, sl component.StabilityLevel) { + if sl >= component.StabilityLevelAlpha { + logger.Debug(sl.LogMessage()) + } else { + logger.Info(sl.LogMessage()) + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go new file mode 100644 index 00000000000..b157d2dd64a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/connector.go @@ -0,0 +1,399 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/connectortest" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" +) + +func errDataTypes(id component.ID, from, to pipeline.Signal) error { + return fmt.Errorf("connector %q cannot connect from %s to %s: %w", id, from, to, pipeline.ErrSignalNotSupported) +} + +// ConnectorBuilder is a helper struct that given a set of Configs and Factories helps with creating connectors. +type ConnectorBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]connector.Factory +} + +// NewConnector creates a new ConnectorBuilder to help with creating components form a set of configs and factories. +func NewConnector(cfgs map[component.ID]component.Config, factories map[component.Type]connector.Factory) *ConnectorBuilder { + return &ConnectorBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTracesToTraces creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToTracesStability()) + return f.CreateTracesToTraces(ctx, set, cfg, next) +} + +// CreateTracesToMetrics creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToMetricsStability()) + return f.CreateTracesToMetrics(ctx, set, cfg, next) +} + +// CreateTracesToLogs creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesToLogsStability()) + return f.CreateTracesToLogs(ctx, set, cfg, next) +} + +// CreateTracesToProfiles creates a Traces connector based on the settings and config. +func (b *ConnectorBuilder) CreateTracesToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (connector.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, pipeline.SignalTraces, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.TracesToProfilesStability()) + return f.CreateTracesToProfiles(ctx, set, cfg, next) +} + +// CreateMetricsToTraces creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToTracesStability()) + return f.CreateMetricsToTraces(ctx, set, cfg, next) +} + +// CreateMetricsToMetrics creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToMetricsStability()) + return f.CreateMetricsToMetrics(ctx, set, cfg, next) +} + +// CreateMetricsToLogs creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsToLogsStability()) + return f.CreateMetricsToLogs(ctx, set, cfg, next) +} + +// CreateMetricsToProfiles creates a Metrics connector based on the settings and config. +func (b *ConnectorBuilder) CreateMetricsToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (connector.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, pipeline.SignalMetrics, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.MetricsToProfilesStability()) + return f.CreateMetricsToProfiles(ctx, set, cfg, next) +} + +// CreateLogsToTraces creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToTracesStability()) + return f.CreateLogsToTraces(ctx, set, cfg, next) +} + +// CreateLogsToMetrics creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToMetricsStability()) + return f.CreateLogsToMetrics(ctx, set, cfg, next) +} + +// CreateLogsToLogs creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsToLogsStability()) + return f.CreateLogsToLogs(ctx, set, cfg, next) +} + +// CreateLogsToProfiles creates a Logs connector based on the settings and config. +func (b *ConnectorBuilder) CreateLogsToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (connector.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, pipeline.SignalLogs, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.LogsToProfilesStability()) + return f.CreateLogsToProfiles(ctx, set, cfg, next) +} + +// CreateProfilesToTraces creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToTraces(ctx context.Context, set connector.Settings, next consumer.Traces) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalTraces) + } + + logStabilityLevel(set.Logger, f.ProfilesToTracesStability()) + return f.CreateProfilesToTraces(ctx, set, cfg, next) +} + +// CreateProfilesToMetrics creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToMetrics(ctx context.Context, set connector.Settings, next consumer.Metrics) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalMetrics) + } + + logStabilityLevel(set.Logger, f.ProfilesToMetricsStability()) + return f.CreateProfilesToMetrics(ctx, set, cfg, next) +} + +// CreateProfilesToLogs creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToLogs(ctx context.Context, set connector.Settings, next consumer.Logs) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, pipeline.SignalLogs) + } + + logStabilityLevel(set.Logger, f.ProfilesToLogsStability()) + return f.CreateProfilesToLogs(ctx, set, cfg, next) +} + +// CreateProfilesToProfiles creates a Profiles connector based on the settings and config. +func (b *ConnectorBuilder) CreateProfilesToProfiles(ctx context.Context, set connector.Settings, next xconsumer.Profiles) (xconnector.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("connector %q is not configured", set.ID) + } + + connFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("connector factory not available for: %q", set.ID) + } + + f, ok := connFact.(xconnector.Factory) + if !ok { + return nil, errDataTypes(set.ID, xpipeline.SignalProfiles, xpipeline.SignalProfiles) + } + + logStabilityLevel(set.Logger, f.ProfilesToProfilesStability()) + return f.CreateProfilesToProfiles(ctx, set, cfg, next) +} + +func (b *ConnectorBuilder) IsConfigured(componentID component.ID) bool { + _, ok := b.cfgs[componentID] + return ok +} + +func (b *ConnectorBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopConnectorConfigsAndFactories returns a configuration and factories that allows building a new nop connector. +func NewNopConnectorConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]connector.Factory) { + nopFactory := connectortest.NewNopFactory() + // Use a different ID than receivertest and exportertest to avoid ambiguous + // configuration scenarios. Ambiguous IDs are detected in the 'otelcol' package, + // but lower level packages such as 'service' assume that IDs are disambiguated. + connID := component.NewIDWithName(nopType, "conn") + + configs := map[component.ID]component.Config{ + connID: nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]connector.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go new file mode 100644 index 00000000000..6570828b76f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/exporter.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/xexporter" + "go.opentelemetry.io/collector/pipeline" +) + +// ExporterBuilder is a helper struct that given a set of Configs and Factories helps with creating exporters. +type ExporterBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]exporter.Factory +} + +// NewExporter creates a new ExporterBuilder to help with creating components form a set of configs and factories. +func NewExporter(cfgs map[component.ID]component.Config, factories map[component.Type]exporter.Factory) *ExporterBuilder { + return &ExporterBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTraces creates a Traces exporter based on the settings and config. +func (b *ExporterBuilder) CreateTraces(ctx context.Context, set exporter.Settings) (exporter.Traces, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesStability()) + return f.CreateTraces(ctx, set, cfg) +} + +// CreateMetrics creates a Metrics exporter based on the settings and config. +func (b *ExporterBuilder) CreateMetrics(ctx context.Context, set exporter.Settings) (exporter.Metrics, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsStability()) + return f.CreateMetrics(ctx, set, cfg) +} + +// CreateLogs creates a Logs exporter based on the settings and config. +func (b *ExporterBuilder) CreateLogs(ctx context.Context, set exporter.Settings) (exporter.Logs, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsStability()) + return f.CreateLogs(ctx, set, cfg) +} + +// CreateProfiles creates a Profiles exporter based on the settings and config. +func (b *ExporterBuilder) CreateProfiles(ctx context.Context, set exporter.Settings) (xexporter.Profiles, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("exporter %q is not configured", set.ID) + } + + expFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("exporter factory not available for: %q", set.ID) + } + + f, ok := expFact.(xexporter.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + + logStabilityLevel(set.Logger, f.ProfilesStability()) + return f.CreateProfiles(ctx, set, cfg) +} + +func (b *ExporterBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopExporterConfigsAndFactories returns a configuration and factories that allows building a new nop exporter. +func NewNopExporterConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]exporter.Factory) { + nopFactory := exportertest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]exporter.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go new file mode 100644 index 00000000000..6ac32fe5698 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/extension.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/extensiontest" +) + +// Extension is an interface that allows using implementations of the builder +// from different packages. +type Extension interface { + Create(context.Context, extension.Settings) (extension.Extension, error) + Factory(component.Type) component.Factory +} + +// ExtensionBuilder is a helper struct that given a set of Configs and Factories helps with creating extensions. +type ExtensionBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]extension.Factory +} + +// NewExtension creates a new ExtensionBuilder to help with creating +// components form a set of configs and factories. +func NewExtension(cfgs map[component.ID]component.Config, factories map[component.Type]extension.Factory) *ExtensionBuilder { + return &ExtensionBuilder{cfgs: cfgs, factories: factories} +} + +// Create creates an extension based on the settings and configs available. +func (b *ExtensionBuilder) Create(ctx context.Context, set extension.Settings) (extension.Extension, error) { + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("extension %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("extension factory not available for: %q", set.ID) + } + + sl := f.Stability() + if sl >= component.StabilityLevelAlpha { + set.Logger.Debug(sl.LogMessage()) + } else { + set.Logger.Info(sl.LogMessage()) + } + return f.Create(ctx, set, cfg) +} + +func (b *ExtensionBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopExtensionConfigsAndFactories returns a configuration and factories that allows building a new nop processor. +func NewNopExtensionConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]extension.Factory) { + nopFactory := extensiontest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]extension.Factory{ + nopType: nopFactory, + } + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go new file mode 100644 index 00000000000..c0df0f3b575 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/processor.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processortest" + "go.opentelemetry.io/collector/processor/xprocessor" +) + +// ProcessorBuilder processor is a helper struct that given a set of Configs +// and Factories helps with creating processors. +type ProcessorBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]processor.Factory +} + +// NewProcessor creates a new ProcessorBuilder to help with creating components form a set of configs and factories. +func NewProcessor(cfgs map[component.ID]component.Config, factories map[component.Type]processor.Factory) *ProcessorBuilder { + return &ProcessorBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTraces creates a Traces processor based on the settings and config. +func (b *ProcessorBuilder) CreateTraces(ctx context.Context, set processor.Settings, next consumer.Traces) (processor.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesStability()) + return f.CreateTraces(ctx, set, cfg, next) +} + +// CreateMetrics creates a Metrics processor based on the settings and config. +func (b *ProcessorBuilder) CreateMetrics(ctx context.Context, set processor.Settings, next consumer.Metrics) (processor.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsStability()) + return f.CreateMetrics(ctx, set, cfg, next) +} + +// CreateLogs creates a Logs processor based on the settings and config. +func (b *ProcessorBuilder) CreateLogs(ctx context.Context, set processor.Settings, next consumer.Logs) (processor.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsStability()) + return f.CreateLogs(ctx, set, cfg, next) +} + +// CreateProfiles creates a Profiles processor based on the settings and config. +func (b *ProcessorBuilder) CreateProfiles(ctx context.Context, set processor.Settings, next xconsumer.Profiles) (xprocessor.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("processor %q is not configured", set.ID) + } + + procFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("processor factory not available for: %q", set.ID) + } + + f, ok := procFact.(xprocessor.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + logStabilityLevel(set.Logger, f.ProfilesStability()) + return f.CreateProfiles(ctx, set, cfg, next) +} + +func (b *ProcessorBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopProcessorConfigsAndFactories returns a configuration and factories that allows building a new nop processor. +func NewNopProcessorConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]processor.Factory) { + nopFactory := processortest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]processor.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go b/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go new file mode 100644 index 00000000000..007d9be2187 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/builders/receiver.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package builders // import "go.opentelemetry.io/collector/service/internal/builders" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/receivertest" + "go.opentelemetry.io/collector/receiver/xreceiver" +) + +// ReceiverBuilder receiver is a helper struct that given a set of Configs and +// Factories helps with creating receivers. +type ReceiverBuilder struct { + cfgs map[component.ID]component.Config + factories map[component.Type]receiver.Factory +} + +// NewReceiver creates a new ReceiverBuilder to help with creating +// components form a set of configs and factories. +func NewReceiver(cfgs map[component.ID]component.Config, factories map[component.Type]receiver.Factory) *ReceiverBuilder { + return &ReceiverBuilder{cfgs: cfgs, factories: factories} +} + +// CreateTraces creates a Traces receiver based on the settings and config. +func (b *ReceiverBuilder) CreateTraces(ctx context.Context, set receiver.Settings, next consumer.Traces) (receiver.Traces, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.TracesStability()) + return f.CreateTraces(ctx, set, cfg, next) +} + +// CreateMetrics creates a Metrics receiver based on the settings and config. +func (b *ReceiverBuilder) CreateMetrics(ctx context.Context, set receiver.Settings, next consumer.Metrics) (receiver.Metrics, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.MetricsStability()) + return f.CreateMetrics(ctx, set, cfg, next) +} + +// CreateLogs creates a Logs receiver based on the settings and config. +func (b *ReceiverBuilder) CreateLogs(ctx context.Context, set receiver.Settings, next consumer.Logs) (receiver.Logs, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + f, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + logStabilityLevel(set.Logger, f.LogsStability()) + return f.CreateLogs(ctx, set, cfg, next) +} + +// CreateProfiles creates a Profiles receiver based on the settings and config. +func (b *ReceiverBuilder) CreateProfiles(ctx context.Context, set receiver.Settings, next xconsumer.Profiles) (xreceiver.Profiles, error) { + if next == nil { + return nil, errNilNextConsumer + } + cfg, existsCfg := b.cfgs[set.ID] + if !existsCfg { + return nil, fmt.Errorf("receiver %q is not configured", set.ID) + } + + recvFact, existsFactory := b.factories[set.ID.Type()] + if !existsFactory { + return nil, fmt.Errorf("receiver factory not available for: %q", set.ID) + } + + f, ok := recvFact.(xreceiver.Factory) + if !ok { + return nil, pipeline.ErrSignalNotSupported + } + + logStabilityLevel(set.Logger, f.ProfilesStability()) + return f.CreateProfiles(ctx, set, cfg, next) +} + +func (b *ReceiverBuilder) Factory(componentType component.Type) component.Factory { + return b.factories[componentType] +} + +// NewNopReceiverConfigsAndFactories returns a configuration and factories that allows building a new nop receiver. +func NewNopReceiverConfigsAndFactories() (map[component.ID]component.Config, map[component.Type]receiver.Factory) { + nopFactory := receivertest.NewNopFactory() + configs := map[component.ID]component.Config{ + component.NewID(nopType): nopFactory.CreateDefaultConfig(), + } + factories := map[component.Type]receiver.Factory{ + nopType: nopFactory, + } + + return configs, factories +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go b/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go index 0b700a45e54..6e867eef54e 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/capabilityconsumer/capabilities.go @@ -5,13 +5,14 @@ package capabilityconsumer // import "go.opentelemetry.io/collector/service/inte import ( "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" ) -func NewLogs(logs consumer.Logs, cap consumer.Capabilities) consumer.Logs { - if logs.Capabilities() == cap { +func NewLogs(logs consumer.Logs, capabilities consumer.Capabilities) consumer.Logs { + if logs.Capabilities() == capabilities { return logs } - return capLogs{Logs: logs, cap: cap} + return capLogs{Logs: logs, cap: capabilities} } type capLogs struct { @@ -23,11 +24,11 @@ func (mts capLogs) Capabilities() consumer.Capabilities { return mts.cap } -func NewMetrics(metrics consumer.Metrics, cap consumer.Capabilities) consumer.Metrics { - if metrics.Capabilities() == cap { +func NewMetrics(metrics consumer.Metrics, capabilities consumer.Capabilities) consumer.Metrics { + if metrics.Capabilities() == capabilities { return metrics } - return capMetrics{Metrics: metrics, cap: cap} + return capMetrics{Metrics: metrics, cap: capabilities} } type capMetrics struct { @@ -39,11 +40,11 @@ func (mts capMetrics) Capabilities() consumer.Capabilities { return mts.cap } -func NewTraces(traces consumer.Traces, cap consumer.Capabilities) consumer.Traces { - if traces.Capabilities() == cap { +func NewTraces(traces consumer.Traces, capabilities consumer.Capabilities) consumer.Traces { + if traces.Capabilities() == capabilities { return traces } - return capTraces{Traces: traces, cap: cap} + return capTraces{Traces: traces, cap: capabilities} } type capTraces struct { @@ -54,3 +55,19 @@ type capTraces struct { func (mts capTraces) Capabilities() consumer.Capabilities { return mts.cap } + +func NewProfiles(profiles xconsumer.Profiles, capabilities consumer.Capabilities) xconsumer.Profiles { + if profiles.Capabilities() == capabilities { + return profiles + } + return capProfiles{Profiles: profiles, cap: capabilities} +} + +type capProfiles struct { + xconsumer.Profiles + cap consumer.Capabilities +} + +func (mts capProfiles) Capabilities() consumer.Capabilities { + return mts.cap +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go b/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go index f4178977b2d..f02d19fb082 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/components/loggers.go @@ -9,6 +9,7 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pipeline" ) const ( @@ -21,21 +22,21 @@ const ( zapReceiverInPipeline = "receiver_in_pipeline" ) -func ReceiverLogger(logger *zap.Logger, id component.ID, dt component.DataType) *zap.Logger { +func ReceiverLogger(logger *zap.Logger, id component.ID, dt pipeline.Signal) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindReceiver.String())), zap.String(zapNameKey, id.String()), zap.String(zapDataTypeKey, dt.String())) } -func ProcessorLogger(logger *zap.Logger, id component.ID, pipelineID component.ID) *zap.Logger { +func ProcessorLogger(logger *zap.Logger, id component.ID, pipelineID pipeline.ID) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindProcessor.String())), zap.String(zapNameKey, id.String()), zap.String(zapPipelineKey, pipelineID.String())) } -func ExporterLogger(logger *zap.Logger, id component.ID, dt component.DataType) *zap.Logger { +func ExporterLogger(logger *zap.Logger, id component.ID, dt pipeline.Signal) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindExporter.String())), zap.String(zapDataTypeKey, dt.String()), @@ -48,7 +49,7 @@ func ExtensionLogger(logger *zap.Logger, id component.ID) *zap.Logger { zap.String(zapNameKey, id.String())) } -func ConnectorLogger(logger *zap.Logger, id component.ID, expDT, rcvDT component.DataType) *zap.Logger { +func ConnectorLogger(logger *zap.Logger, id component.ID, expDT, rcvDT pipeline.Signal) *zap.Logger { return logger.With( zap.String(zapKindKey, strings.ToLower(component.KindConnector.String())), zap.String(zapNameKey, id.String()), diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go new file mode 100644 index 00000000000..128e6fde926 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/capabilities.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" +) + +const capabilitiesSeed = "capabilities" + +var _ consumerNode = (*capabilitiesNode)(nil) + +// Every pipeline has a "virtual" capabilities node immediately after the receiver(s). +// There are two purposes for this node: +// 1. Present aggregated capabilities to receivers, such as whether the pipeline mutates data. +// 2. Present a consistent "first consumer" for each pipeline. +// The nodeID is derived from "pipeline ID". +type capabilitiesNode struct { + nodeID + pipelineID pipeline.ID + baseConsumer + consumer.ConsumeTracesFunc + consumer.ConsumeMetricsFunc + consumer.ConsumeLogsFunc + xconsumer.ConsumeProfilesFunc +} + +func newCapabilitiesNode(pipelineID pipeline.ID) *capabilitiesNode { + return &capabilitiesNode{ + nodeID: newNodeID(capabilitiesSeed, pipelineID.String()), + pipelineID: pipelineID, + } +} + +func (n *capabilitiesNode) getConsumer() baseConsumer { + return n +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go new file mode 100644 index 00000000000..1f654454ee6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/connector.go @@ -0,0 +1,220 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/capabilityconsumer" + "go.opentelemetry.io/collector/service/internal/components" +) + +const connectorSeed = "connector" + +var _ consumerNode = (*connectorNode)(nil) + +// A connector instance connects one pipeline type to one other pipeline type. +// Therefore, nodeID is derived from "exporter pipeline type", "receiver pipeline type", and "component ID". +type connectorNode struct { + nodeID + componentID component.ID + exprPipelineType pipeline.Signal + rcvrPipelineType pipeline.Signal + component.Component +} + +func newConnectorNode(exprPipelineType, rcvrPipelineType pipeline.Signal, connID component.ID) *connectorNode { + return &connectorNode{ + nodeID: newNodeID(connectorSeed, connID.String(), exprPipelineType.String(), rcvrPipelineType.String()), + componentID: connID, + exprPipelineType: exprPipelineType, + rcvrPipelineType: rcvrPipelineType, + } +} + +func (n *connectorNode) getConsumer() baseConsumer { + return n.Component.(baseConsumer) +} + +func (n *connectorNode) buildComponent( + ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + tel.Logger = components.ConnectorLogger(tel.Logger, n.componentID, n.exprPipelineType, n.rcvrPipelineType) + set := connector.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + switch n.rcvrPipelineType { + case pipeline.SignalTraces: + return n.buildTraces(ctx, set, builder, nexts) + case pipeline.SignalMetrics: + return n.buildMetrics(ctx, set, builder, nexts) + case pipeline.SignalLogs: + return n.buildLogs(ctx, set, builder, nexts) + case xpipeline.SignalProfiles: + return n.buildProfiles(ctx, set, builder, nexts) + } + return nil +} + +func (n *connectorNode) buildTraces( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Traces, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Traces) + } + next := connector.NewTracesRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalTraces: + var conn connector.Traces + conn, err = builder.CreateTracesToTraces(ctx, set, next) + if err != nil { + return err + } + n.Component = componentTraces{ + Component: conn, + Traces: capabilityconsumer.NewTraces(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToTraces(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToTraces(ctx, set, next) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfilesToTraces(ctx, set, next) + } + return err +} + +func (n *connectorNode) buildMetrics( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Metrics, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Metrics) + } + next := connector.NewMetricsRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalMetrics: + var conn connector.Metrics + conn, err = builder.CreateMetricsToMetrics(ctx, set, next) + if err != nil { + return err + } + n.Component = componentMetrics{ + Component: conn, + Metrics: capabilityconsumer.NewMetrics(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToMetrics(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToMetrics(ctx, set, next) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfilesToMetrics(ctx, set, next) + } + return err +} + +func (n *connectorNode) buildLogs( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]consumer.Logs, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Logs) + } + next := connector.NewLogsRouter(consumers) + + var err error + switch n.exprPipelineType { + case pipeline.SignalLogs: + var conn connector.Logs + conn, err = builder.CreateLogsToLogs(ctx, set, next) + if err != nil { + return err + } + n.Component = componentLogs{ + Component: conn, + Logs: capabilityconsumer.NewLogs(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToLogs(ctx, set, next) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToLogs(ctx, set, next) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfilesToLogs(ctx, set, next) + } + return err +} + +func (n *connectorNode) buildProfiles( + ctx context.Context, + set connector.Settings, + builder *builders.ConnectorBuilder, + nexts []baseConsumer, +) error { + consumers := make(map[pipeline.ID]xconsumer.Profiles, len(nexts)) + for _, next := range nexts { + consumers[next.(*capabilitiesNode).pipelineID] = next.(xconsumer.Profiles) + } + next := xconnector.NewProfilesRouter(consumers) + + var err error + switch n.exprPipelineType { + case xpipeline.SignalProfiles: + var conn xconnector.Profiles + conn, err = builder.CreateProfilesToProfiles(ctx, set, next) + if err != nil { + return err + } + n.Component = componentProfiles{ + Component: conn, + Profiles: capabilityconsumer.NewProfiles(conn, aggregateCap(conn, nexts)), + } + return nil + case pipeline.SignalTraces: + n.Component, err = builder.CreateTracesToProfiles(ctx, set, next) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetricsToProfiles(ctx, set, next) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogsToProfiles(ctx, set, next) + } + return err +} + +// When connecting pipelines of the same data type, the connector must +// inherit the capabilities of pipelines in which it is acting as a receiver. +// Since the incoming and outgoing data types are the same, we must also consider +// that the connector itself may mutate the data and pass it along. +func aggregateCap(base baseConsumer, nexts []baseConsumer) consumer.Capabilities { + capabilities := base.Capabilities() + for _, next := range nexts { + capabilities.MutatesData = capabilities.MutatesData || next.Capabilities().MutatesData + } + return capabilities +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go new file mode 100644 index 00000000000..2c44a993031 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/consumer.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" +) + +// baseConsumer redeclared here since not public in consumer package. May consider to make that public. +type baseConsumer interface { + Capabilities() consumer.Capabilities +} + +type consumerNode interface { + getConsumer() baseConsumer +} + +type componentTraces struct { + component.Component + consumer.Traces +} + +type componentMetrics struct { + component.Component + consumer.Metrics +} + +type componentLogs struct { + component.Component + consumer.Logs +} + +type componentProfiles struct { + component.Component + xconsumer.Profiles +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go new file mode 100644 index 00000000000..ab7d0f6392b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/exporter.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/components" +) + +const exporterSeed = "exporter" + +var _ consumerNode = (*exporterNode)(nil) + +// An exporter instance can be shared by multiple pipelines of the same type. +// Therefore, nodeID is derived from "pipeline type" and "component ID". +type exporterNode struct { + nodeID + componentID component.ID + pipelineType pipeline.Signal + component.Component +} + +func newExporterNode(pipelineType pipeline.Signal, exprID component.ID) *exporterNode { + return &exporterNode{ + nodeID: newNodeID(exporterSeed, pipelineType.String(), exprID.String()), + componentID: exprID, + pipelineType: pipelineType, + } +} + +func (n *exporterNode) getConsumer() baseConsumer { + return n.Component.(baseConsumer) +} + +func (n *exporterNode) buildComponent( + ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ExporterBuilder, +) error { + tel.Logger = components.ExporterLogger(tel.Logger, n.componentID, n.pipelineType) + set := exporter.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + var err error + switch n.pipelineType { + case pipeline.SignalTraces: + n.Component, err = builder.CreateTraces(ctx, set) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetrics(ctx, set) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogs(ctx, set) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfiles(ctx, set) + default: + return fmt.Errorf("error creating exporter %q for data type %q is not supported", set.ID, n.pipelineType) + } + if err != nil { + return fmt.Errorf("failed to create %q exporter for data type %q: %w", set.ID, n.pipelineType, err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go new file mode 100644 index 00000000000..13c8d4ad1c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/fanout.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "go.opentelemetry.io/collector/pipeline" +) + +const fanOutToExporters = "fanout_to_exporters" + +var _ consumerNode = (*fanOutNode)(nil) + +// Each pipeline has one fan-out node before exporters. +// Therefore, nodeID is derived from "pipeline ID". +type fanOutNode struct { + nodeID + pipelineID pipeline.ID + baseConsumer +} + +func newFanOutNode(pipelineID pipeline.ID) *fanOutNode { + return &fanOutNode{ + nodeID: newNodeID(fanOutToExporters, pipelineID.String()), + pipelineID: pipelineID, + } +} + +func (n *fanOutNode) getConsumer() baseConsumer { + return n.baseConsumer +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go index c95d1f54b7e..2bc4e163d7e 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/graph.go @@ -19,34 +19,40 @@ import ( "strings" "go.uber.org/multierr" + "go.uber.org/zap" "gonum.org/v1/gonum/graph" "gonum.org/v1/gonum/graph/simple" "gonum.org/v1/gonum/graph/topo" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/connector/xconnector" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/consumer/xconsumer" "go.opentelemetry.io/collector/internal/fanoutconsumer" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/capabilityconsumer" - "go.opentelemetry.io/collector/service/internal/servicetelemetry" + "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/pipelines" ) // Settings holds configuration for building builtPipelines. type Settings struct { - Telemetry servicetelemetry.TelemetrySettings + Telemetry component.TelemetrySettings BuildInfo component.BuildInfo - ReceiverBuilder *receiver.Builder - ProcessorBuilder *processor.Builder - ExporterBuilder *exporter.Builder - ConnectorBuilder *connector.Builder + ReceiverBuilder *builders.ReceiverBuilder + ProcessorBuilder *builders.ProcessorBuilder + ExporterBuilder *builders.ExporterBuilder + ConnectorBuilder *builders.ConnectorBuilder // PipelineConfigs is a map of component.ID to PipelineConfig. PipelineConfigs pipelines.Config + + ReportStatus status.ServiceStatusFunc } type Graph struct { @@ -54,12 +60,12 @@ type Graph struct { componentGraph *simple.DirectedGraph // Keep track of how nodes relate to pipelines, so we can declare edges in the graph. - pipelines map[component.ID]*pipelineNodes + pipelines map[pipeline.ID]*pipelineNodes // Keep track of status source per node - instanceIDs map[int64]*component.InstanceID + instanceIDs map[int64]*componentstatus.InstanceID - telemetry servicetelemetry.TelemetrySettings + telemetry component.TelemetrySettings } // Build builds a full pipeline graph. @@ -67,8 +73,8 @@ type Graph struct { func Build(ctx context.Context, set Settings) (*Graph, error) { pipelines := &Graph{ componentGraph: simple.NewDirectedGraph(), - pipelines: make(map[component.ID]*pipelineNodes, len(set.PipelineConfigs)), - instanceIDs: make(map[int64]*component.InstanceID), + pipelines: make(map[pipeline.ID]*pipelineNodes, len(set.PipelineConfigs)), + instanceIDs: make(map[int64]*componentstatus.InstanceID), telemetry: set.Telemetry, } for pipelineID := range set.PipelineConfigs { @@ -91,8 +97,8 @@ func (g *Graph) createNodes(set Settings) error { connectors := make(map[component.ID]struct{}) // Keep track of connectors and where they are used. (map[connectorID][]pipelineID). - connectorsAsExporter := make(map[component.ID][]component.ID) - connectorsAsReceiver := make(map[component.ID][]component.ID) + connectorsAsExporter := make(map[component.ID][]pipeline.ID) + connectorsAsReceiver := make(map[component.ID][]pipeline.ID) // Build each pipelineNodes struct for each pipeline by parsing the pipelineCfg. // Also populates the connectors, connectorsAsExporter and connectorsAsReceiver maps. @@ -136,19 +142,19 @@ func (g *Graph) createNodes(set Settings) error { } connFactory := factory.(connector.Factory) - expTypes := make(map[component.DataType]bool) + expTypes := make(map[pipeline.Signal]bool) for _, pipelineID := range connectorsAsExporter[connID] { // The presence of each key indicates how the connector is used as an exporter. // The value is initially set to false. Later we will set the value to true *if* we // confirm that there is a supported corresponding use as a receiver. - expTypes[pipelineID.Type()] = false + expTypes[pipelineID.Signal()] = false } - recTypes := make(map[component.DataType]bool) + recTypes := make(map[pipeline.Signal]bool) for _, pipelineID := range connectorsAsReceiver[connID] { // The presence of each key indicates how the connector is used as a receiver. // The value is initially set to false. Later we will set the value to true *if* we // confirm that there is a supported corresponding use as an exporter. - recTypes[pipelineID.Type()] = false + recTypes[pipelineID.Signal()] = false } for expType := range expTypes { @@ -176,7 +182,7 @@ func (g *Graph) createNodes(set Settings) error { for _, eID := range connectorsAsExporter[connID] { for _, rID := range connectorsAsReceiver[connID] { - if connectorStability(connFactory, eID.Type(), rID.Type()) == component.StabilityLevelUndefined { + if connectorStability(connFactory, eID.Signal(), rID.Signal()) == component.StabilityLevelUndefined { // Connector is not supported for this combination, but we know it is used correctly elsewhere continue } @@ -190,70 +196,54 @@ func (g *Graph) createNodes(set Settings) error { return nil } -func (g *Graph) createReceiver(pipelineID, recvID component.ID) *receiverNode { - rcvrNode := newReceiverNode(pipelineID.Type(), recvID) +func (g *Graph) createReceiver(pipelineID pipeline.ID, recvID component.ID) *receiverNode { + rcvrNode := newReceiverNode(pipelineID.Signal(), recvID) if node := g.componentGraph.Node(rcvrNode.ID()); node != nil { - g.instanceIDs[node.ID()].PipelineIDs[pipelineID] = struct{}{} + instanceID := g.instanceIDs[node.ID()] + g.instanceIDs[node.ID()] = instanceID.WithPipelines(pipelineID) return node.(*receiverNode) } g.componentGraph.AddNode(rcvrNode) - g.instanceIDs[rcvrNode.ID()] = &component.InstanceID{ - ID: recvID, - Kind: component.KindReceiver, - PipelineIDs: map[component.ID]struct{}{ - pipelineID: {}, - }, - } + g.instanceIDs[rcvrNode.ID()] = componentstatus.NewInstanceID( + recvID, component.KindReceiver, pipelineID, + ) return rcvrNode } -func (g *Graph) createProcessor(pipelineID, procID component.ID) *processorNode { +func (g *Graph) createProcessor(pipelineID pipeline.ID, procID component.ID) *processorNode { procNode := newProcessorNode(pipelineID, procID) g.componentGraph.AddNode(procNode) - g.instanceIDs[procNode.ID()] = &component.InstanceID{ - ID: procID, - Kind: component.KindProcessor, - PipelineIDs: map[component.ID]struct{}{ - pipelineID: {}, - }, - } + g.instanceIDs[procNode.ID()] = componentstatus.NewInstanceID( + procID, component.KindProcessor, pipelineID, + ) return procNode } -func (g *Graph) createExporter(pipelineID, exprID component.ID) *exporterNode { - expNode := newExporterNode(pipelineID.Type(), exprID) +func (g *Graph) createExporter(pipelineID pipeline.ID, exprID component.ID) *exporterNode { + expNode := newExporterNode(pipelineID.Signal(), exprID) if node := g.componentGraph.Node(expNode.ID()); node != nil { - g.instanceIDs[expNode.ID()].PipelineIDs[pipelineID] = struct{}{} + instanceID := g.instanceIDs[expNode.ID()] + g.instanceIDs[expNode.ID()] = instanceID.WithPipelines(pipelineID) return node.(*exporterNode) } g.componentGraph.AddNode(expNode) - g.instanceIDs[expNode.ID()] = &component.InstanceID{ - ID: expNode.componentID, - Kind: component.KindExporter, - PipelineIDs: map[component.ID]struct{}{ - pipelineID: {}, - }, - } + g.instanceIDs[expNode.ID()] = componentstatus.NewInstanceID( + expNode.componentID, component.KindExporter, pipelineID, + ) return expNode } -func (g *Graph) createConnector(exprPipelineID, rcvrPipelineID, connID component.ID) *connectorNode { - connNode := newConnectorNode(exprPipelineID.Type(), rcvrPipelineID.Type(), connID) +func (g *Graph) createConnector(exprPipelineID, rcvrPipelineID pipeline.ID, connID component.ID) *connectorNode { + connNode := newConnectorNode(exprPipelineID.Signal(), rcvrPipelineID.Signal(), connID) if node := g.componentGraph.Node(connNode.ID()); node != nil { instanceID := g.instanceIDs[connNode.ID()] - instanceID.PipelineIDs[exprPipelineID] = struct{}{} - instanceID.PipelineIDs[rcvrPipelineID] = struct{}{} + g.instanceIDs[connNode.ID()] = instanceID.WithPipelines(exprPipelineID, rcvrPipelineID) return node.(*connectorNode) } g.componentGraph.AddNode(connNode) - g.instanceIDs[connNode.ID()] = &component.InstanceID{ - ID: connNode.componentID, - Kind: component.KindConnector, - PipelineIDs: map[component.ID]struct{}{ - exprPipelineID: {}, - rcvrPipelineID: {}, - }, - } + g.instanceIDs[connNode.ID()] = componentstatus.NewInstanceID( + connNode.componentID, component.KindConnector, exprPipelineID, rcvrPipelineID, + ) return connNode } @@ -296,22 +286,16 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { for i := len(nodes) - 1; i >= 0; i-- { node := nodes[i] - // skipped for capabilitiesNodes and fanoutNodes as they are not assigned componentIDs. - var telemetrySettings component.TelemetrySettings - if instanceID, ok := g.instanceIDs[node.ID()]; ok { - telemetrySettings = set.Telemetry.ToComponentTelemetrySettings(instanceID) - } - switch n := node.(type) { case *receiverNode: - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ReceiverBuilder, g.nextConsumers(n.ID())) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ReceiverBuilder, g.nextConsumers(n.ID())) case *processorNode: // nextConsumers is guaranteed to be length 1. Either it is the next processor or it is the fanout node for the exporters. - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ProcessorBuilder, g.nextConsumers(n.ID())[0]) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ProcessorBuilder, g.nextConsumers(n.ID())[0]) case *exporterNode: - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ExporterBuilder) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ExporterBuilder) case *connectorNode: - err = n.buildComponent(ctx, telemetrySettings, set.BuildInfo, set.ConnectorBuilder, g.nextConsumers(n.ID())) + err = n.buildComponent(ctx, set.Telemetry, set.BuildInfo, set.ConnectorBuilder, g.nextConsumers(n.ID())) case *capabilitiesNode: capability := consumer.Capabilities{ // The fanOutNode represents the aggregate capabilities of the exporters in the pipeline. @@ -321,41 +305,51 @@ func (g *Graph) buildComponents(ctx context.Context, set Settings) error { capability.MutatesData = capability.MutatesData || proc.getConsumer().Capabilities().MutatesData } next := g.nextConsumers(n.ID())[0] - switch n.pipelineID.Type() { - case component.DataTypeTraces: + switch n.pipelineID.Signal() { + case pipeline.SignalTraces: cc := capabilityconsumer.NewTraces(next.(consumer.Traces), capability) n.baseConsumer = cc n.ConsumeTracesFunc = cc.ConsumeTraces - case component.DataTypeMetrics: + case pipeline.SignalMetrics: cc := capabilityconsumer.NewMetrics(next.(consumer.Metrics), capability) n.baseConsumer = cc n.ConsumeMetricsFunc = cc.ConsumeMetrics - case component.DataTypeLogs: + case pipeline.SignalLogs: cc := capabilityconsumer.NewLogs(next.(consumer.Logs), capability) n.baseConsumer = cc n.ConsumeLogsFunc = cc.ConsumeLogs + case xpipeline.SignalProfiles: + cc := capabilityconsumer.NewProfiles(next.(xconsumer.Profiles), capability) + n.baseConsumer = cc + n.ConsumeProfilesFunc = cc.ConsumeProfiles } case *fanOutNode: nexts := g.nextConsumers(n.ID()) - switch n.pipelineID.Type() { - case component.DataTypeTraces: + switch n.pipelineID.Signal() { + case pipeline.SignalTraces: consumers := make([]consumer.Traces, 0, len(nexts)) for _, next := range nexts { consumers = append(consumers, next.(consumer.Traces)) } n.baseConsumer = fanoutconsumer.NewTraces(consumers) - case component.DataTypeMetrics: + case pipeline.SignalMetrics: consumers := make([]consumer.Metrics, 0, len(nexts)) for _, next := range nexts { consumers = append(consumers, next.(consumer.Metrics)) } n.baseConsumer = fanoutconsumer.NewMetrics(consumers) - case component.DataTypeLogs: + case pipeline.SignalLogs: consumers := make([]consumer.Logs, 0, len(nexts)) for _, next := range nexts { consumers = append(consumers, next.(consumer.Logs)) } n.baseConsumer = fanoutconsumer.NewLogs(consumers) + case xpipeline.SignalProfiles: + consumers := make([]xconsumer.Profiles, 0, len(nexts)) + for _, next := range nexts { + consumers = append(consumers, next.(xconsumer.Profiles)) + } + n.baseConsumer = fanoutconsumer.NewProfiles(consumers) } } if err != nil { @@ -394,7 +388,11 @@ type pipelineNodes struct { exporters map[int64]graph.Node } -func (g *Graph) StartAll(ctx context.Context, host component.Host) error { +func (g *Graph) StartAll(ctx context.Context, host *Host) error { + if host == nil { + return errors.New("host cannot be nil") + } + nodes, err := topo.Sort(g.componentGraph) if err != nil { return err @@ -413,25 +411,32 @@ func (g *Graph) StartAll(ctx context.Context, host component.Host) error { } instanceID := g.instanceIDs[node.ID()] - g.telemetry.Status.ReportStatus( + host.Reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStarting), + componentstatus.NewEvent(componentstatus.StatusStarting), ) - if compErr := comp.Start(ctx, host); compErr != nil { - g.telemetry.Status.ReportStatus( + if compErr := comp.Start(ctx, &HostWrapper{Host: host, InstanceID: instanceID}); compErr != nil { + host.Reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(compErr), + componentstatus.NewPermanentErrorEvent(compErr), ) + // We log with zap.AddStacktrace(zap.DPanicLevel) to avoid adding the stack trace to the error log + g.telemetry.Logger.WithOptions(zap.AddStacktrace(zap.DPanicLevel)). + Error("Failed to start component", + zap.Error(compErr), + zap.String("type", instanceID.Kind().String()), + zap.String("id", instanceID.ComponentID().String()), + ) return compErr } - g.telemetry.Status.ReportOKIfStarting(instanceID) + host.Reporter.ReportOKIfStarting(instanceID) } return nil } -func (g *Graph) ShutdownAll(ctx context.Context) error { +func (g *Graph) ShutdownAll(ctx context.Context, reporter status.Reporter) error { nodes, err := topo.Sort(g.componentGraph) if err != nil { return err @@ -452,39 +457,34 @@ func (g *Graph) ShutdownAll(ctx context.Context) error { } instanceID := g.instanceIDs[node.ID()] - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopping), + componentstatus.NewEvent(componentstatus.StatusStopping), ) if compErr := comp.Shutdown(ctx); compErr != nil { errs = multierr.Append(errs, compErr) - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, - component.NewPermanentErrorEvent(compErr), + componentstatus.NewPermanentErrorEvent(compErr), ) continue } - g.telemetry.Status.ReportStatus( + reporter.ReportStatus( instanceID, - component.NewStatusEvent(component.StatusStopped), + componentstatus.NewEvent(componentstatus.StatusStopped), ) } return errs } -// Deprecated: [0.79.0] This function will be removed in the future. -// Several components in the contrib repository use this function so it cannot be removed -// before those cases are removed. In most cases, use of this function can be replaced by a -// connector. See https://github.com/open-telemetry/opentelemetry-collector/issues/7370 and -// https://github.com/open-telemetry/opentelemetry-collector/pull/7390#issuecomment-1483710184 -// for additional information. -func (g *Graph) GetExporters() map[component.DataType]map[component.ID]component.Component { - exportersMap := make(map[component.DataType]map[component.ID]component.Component) - exportersMap[component.DataTypeTraces] = make(map[component.ID]component.Component) - exportersMap[component.DataTypeMetrics] = make(map[component.ID]component.Component) - exportersMap[component.DataTypeLogs] = make(map[component.ID]component.Component) +func (g *Graph) GetExporters() map[pipeline.Signal]map[component.ID]component.Component { + exportersMap := make(map[pipeline.Signal]map[component.ID]component.Component) + exportersMap[pipeline.SignalTraces] = make(map[component.ID]component.Component) + exportersMap[pipeline.SignalMetrics] = make(map[component.ID]component.Component) + exportersMap[pipeline.SignalLogs] = make(map[component.ID]component.Component) + exportersMap[xpipeline.SignalProfiles] = make(map[component.ID]component.Component) for _, pg := range g.pipelines { for _, expNode := range pg.exporters { @@ -527,7 +527,7 @@ func cycleErr(err error, cycles [][]graph.Node) error { for _, node := range cycle { switch n := node.(type) { case *processorNode: - componentDetails = append(componentDetails, fmt.Sprintf("processor %q in pipeline %q", n.componentID, n.pipelineID)) + componentDetails = append(componentDetails, fmt.Sprintf("processor %q in pipeline %q", n.componentID, n.pipelineID.String())) case *connectorNode: componentDetails = append(componentDetails, fmt.Sprintf("connector %q (%s to %s)", n.componentID, n.exprPipelineType, n.rcvrPipelineType)) default: @@ -537,35 +537,83 @@ func cycleErr(err error, cycles [][]graph.Node) error { return fmt.Errorf("cycle detected: %s", strings.Join(componentDetails, " -> ")) } -func connectorStability(f connector.Factory, expType, recType component.Type) component.StabilityLevel { +func connectorStability(f connector.Factory, expType, recType pipeline.Signal) component.StabilityLevel { switch expType { - case component.DataTypeTraces: + case pipeline.SignalTraces: switch recType { - case component.DataTypeTraces: + case pipeline.SignalTraces: return f.TracesToTracesStability() - case component.DataTypeMetrics: + case pipeline.SignalMetrics: return f.TracesToMetricsStability() - case component.DataTypeLogs: + case pipeline.SignalLogs: return f.TracesToLogsStability() + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + return fprof.TracesToProfilesStability() } - case component.DataTypeMetrics: + case pipeline.SignalMetrics: switch recType { - case component.DataTypeTraces: + case pipeline.SignalTraces: return f.MetricsToTracesStability() - case component.DataTypeMetrics: + case pipeline.SignalMetrics: return f.MetricsToMetricsStability() - case component.DataTypeLogs: + case pipeline.SignalLogs: return f.MetricsToLogsStability() + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + return fprof.MetricsToProfilesStability() } - case component.DataTypeLogs: + case pipeline.SignalLogs: switch recType { - case component.DataTypeTraces: + case pipeline.SignalTraces: return f.LogsToTracesStability() - case component.DataTypeMetrics: + case pipeline.SignalMetrics: return f.LogsToMetricsStability() - case component.DataTypeLogs: + case pipeline.SignalLogs: return f.LogsToLogsStability() + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + return fprof.LogsToProfilesStability() + } + case xpipeline.SignalProfiles: + fprof, ok := f.(xconnector.Factory) + if !ok { + return component.StabilityLevelUndefined + } + switch recType { + case pipeline.SignalTraces: + return fprof.ProfilesToTracesStability() + case pipeline.SignalMetrics: + return fprof.ProfilesToMetricsStability() + case pipeline.SignalLogs: + return fprof.ProfilesToLogsStability() + case xpipeline.SignalProfiles: + return fprof.ProfilesToProfilesStability() } } return component.StabilityLevelUndefined } + +var ( + _ getExporters = (*HostWrapper)(nil) + _ component.Host = (*HostWrapper)(nil) + _ componentstatus.Reporter = (*HostWrapper)(nil) +) + +type HostWrapper struct { + *Host + InstanceID *componentstatus.InstanceID +} + +func (host *HostWrapper) Report(event *componentstatus.Event) { + host.Reporter.ReportStatus(host.InstanceID, event) +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go new file mode 100644 index 00000000000..fd81f7dcca8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/host.go @@ -0,0 +1,169 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "net/http" + "path" + "runtime" + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/service/extensions" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/status" + "go.opentelemetry.io/collector/service/internal/zpages" +) + +// TODO: remove as part of https://github.com/open-telemetry/opentelemetry-collector/issues/7370 for service 1.0 +// +// nolint +type getExporters interface { + GetExporters() map[pipeline.Signal]map[component.ID]component.Component +} + +var ( + _ getExporters = (*Host)(nil) + _ component.Host = (*Host)(nil) +) + +type Host struct { + AsyncErrorChannel chan error + Receivers *builders.ReceiverBuilder + Processors *builders.ProcessorBuilder + Exporters *builders.ExporterBuilder + Connectors *builders.ConnectorBuilder + Extensions *builders.ExtensionBuilder + + ModuleInfo extension.ModuleInfo + BuildInfo component.BuildInfo + + Pipelines *Graph + ServiceExtensions *extensions.Extensions + + Reporter status.Reporter +} + +func (host *Host) GetFactory(kind component.Kind, componentType component.Type) component.Factory { + switch kind { + case component.KindReceiver: + return host.Receivers.Factory(componentType) + case component.KindProcessor: + return host.Processors.Factory(componentType) + case component.KindExporter: + return host.Exporters.Factory(componentType) + case component.KindConnector: + return host.Connectors.Factory(componentType) + case component.KindExtension: + return host.Extensions.Factory(componentType) + } + return nil +} + +func (host *Host) GetExtensions() map[component.ID]component.Component { + return host.ServiceExtensions.GetExtensions() +} + +// Deprecated: [0.79.0] This function will be removed in the future. +// Several components in the contrib repository use this function so it cannot be removed +// before those cases are removed. In most cases, use of this function can be replaced by a +// connector. See https://github.com/open-telemetry/opentelemetry-collector/issues/7370 and +// https://github.com/open-telemetry/opentelemetry-collector/pull/7390#issuecomment-1483710184 +// for additional information. +func (host *Host) GetExporters() map[pipeline.Signal]map[component.ID]component.Component { + return host.Pipelines.GetExporters() +} + +func (host *Host) NotifyComponentStatusChange(source *componentstatus.InstanceID, event *componentstatus.Event) { + host.ServiceExtensions.NotifyComponentStatusChange(source, event) + if event.Status() == componentstatus.StatusFatalError { + host.AsyncErrorChannel <- event.Err() + } +} + +const ( + // Paths + zServicePath = "servicez" + zPipelinePath = "pipelinez" + zExtensionPath = "extensionz" + zFeaturePath = "featurez" +) + +// InfoVar is a singleton instance of the Info struct. +var runtimeInfoVar [][2]string + +func init() { + runtimeInfoVar = [][2]string{ + {"StartTimestamp", time.Now().String()}, + {"Go", runtime.Version()}, + {"OS", runtime.GOOS}, + {"Arch", runtime.GOARCH}, + // Add other valuable runtime information here. + } +} + +func (host *Host) RegisterZPages(mux *http.ServeMux, pathPrefix string) { + mux.HandleFunc(path.Join(pathPrefix, zServicePath), host.zPagesRequest) + mux.HandleFunc(path.Join(pathPrefix, zPipelinePath), host.Pipelines.HandleZPages) + mux.HandleFunc(path.Join(pathPrefix, zExtensionPath), host.ServiceExtensions.HandleZPages) + mux.HandleFunc(path.Join(pathPrefix, zFeaturePath), handleFeaturezRequest) +} + +func (host *Host) zPagesRequest(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Service " + host.BuildInfo.Command}) + zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Build Info", Properties: getBuildInfoProperties(host.BuildInfo)}) + zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Runtime Info", Properties: runtimeInfoVar}) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Pipelines", + ComponentEndpoint: zPipelinePath, + Link: true, + }) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Extensions", + ComponentEndpoint: zExtensionPath, + Link: true, + }) + zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ + Name: "Features", + ComponentEndpoint: zFeaturePath, + Link: true, + }) + zpages.WriteHTMLPageFooter(w) +} + +func handleFeaturezRequest(w http.ResponseWriter, _ *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Feature Gates"}) + zpages.WriteHTMLFeaturesTable(w, getFeaturesTableData()) + zpages.WriteHTMLPageFooter(w) +} + +func getFeaturesTableData() zpages.FeatureGateTableData { + data := zpages.FeatureGateTableData{} + featuregate.GlobalRegistry().VisitAll(func(gate *featuregate.Gate) { + data.Rows = append(data.Rows, zpages.FeatureGateTableRowData{ + ID: gate.ID(), + Enabled: gate.IsEnabled(), + Description: gate.Description(), + Stage: gate.Stage().String(), + FromVersion: gate.FromVersion(), + ToVersion: gate.ToVersion(), + ReferenceURL: gate.ReferenceURL(), + }) + }) + return data +} + +func getBuildInfoProperties(buildInfo component.BuildInfo) [][2]string { + return [][2]string{ + {"Command", buildInfo.Command}, + {"Description", buildInfo.Description}, + {"Version", buildInfo.Version}, + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/node.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/node.go new file mode 100644 index 00000000000..0d1c329c68e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/node.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "hash/fnv" + "strings" +) + +type nodeID int64 + +func (n nodeID) ID() int64 { + return int64(n) +} + +func newNodeID(parts ...string) nodeID { + h := fnv.New64a() + h.Write([]byte(strings.Join(parts, "|"))) + // nolint:gosec + return nodeID(h.Sum64()) +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go deleted file mode 100644 index 238bbf3b287..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/nodes.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package graph // import "go.opentelemetry.io/collector/service/internal/graph" - -import ( - "context" - "fmt" - "hash/fnv" - "strings" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/connector" - "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/internal/fanoutconsumer" - "go.opentelemetry.io/collector/processor" - "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/collector/service/internal/capabilityconsumer" - "go.opentelemetry.io/collector/service/internal/components" -) - -const ( - receiverSeed = "receiver" - processorSeed = "processor" - exporterSeed = "exporter" - connectorSeed = "connector" - capabilitiesSeed = "capabilities" - fanOutToExporters = "fanout_to_exporters" -) - -// baseConsumer redeclared here since not public in consumer package. May consider to make that public. -type baseConsumer interface { - Capabilities() consumer.Capabilities -} - -type nodeID int64 - -func (n nodeID) ID() int64 { - return int64(n) -} - -func newNodeID(parts ...string) nodeID { - h := fnv.New64a() - h.Write([]byte(strings.Join(parts, "|"))) - return nodeID(h.Sum64()) -} - -type consumerNode interface { - getConsumer() baseConsumer -} - -// A receiver instance can be shared by multiple pipelines of the same type. -// Therefore, nodeID is derived from "pipeline type" and "component ID". -type receiverNode struct { - nodeID - componentID component.ID - pipelineType component.DataType - component.Component -} - -func newReceiverNode(pipelineType component.DataType, recvID component.ID) *receiverNode { - return &receiverNode{ - nodeID: newNodeID(receiverSeed, pipelineType.String(), recvID.String()), - componentID: recvID, - pipelineType: pipelineType, - } -} - -func (n *receiverNode) buildComponent(ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *receiver.Builder, - nexts []baseConsumer, -) error { - set := receiver.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ReceiverLogger(tel.Logger, n.componentID, n.pipelineType) - var err error - switch n.pipelineType { - case component.DataTypeTraces: - var consumers []consumer.Traces - for _, next := range nexts { - consumers = append(consumers, next.(consumer.Traces)) - } - n.Component, err = builder.CreateTraces(ctx, set, fanoutconsumer.NewTraces(consumers)) - case component.DataTypeMetrics: - var consumers []consumer.Metrics - for _, next := range nexts { - consumers = append(consumers, next.(consumer.Metrics)) - } - n.Component, err = builder.CreateMetrics(ctx, set, fanoutconsumer.NewMetrics(consumers)) - case component.DataTypeLogs: - var consumers []consumer.Logs - for _, next := range nexts { - consumers = append(consumers, next.(consumer.Logs)) - } - n.Component, err = builder.CreateLogs(ctx, set, fanoutconsumer.NewLogs(consumers)) - default: - return fmt.Errorf("error creating receiver %q for data type %q is not supported", set.ID, n.pipelineType) - } - if err != nil { - return fmt.Errorf("failed to create %q receiver for data type %q: %w", set.ID, n.pipelineType, err) - } - return nil -} - -var _ consumerNode = (*processorNode)(nil) - -// Every processor instance is unique to one pipeline. -// Therefore, nodeID is derived from "pipeline ID" and "component ID". -type processorNode struct { - nodeID - componentID component.ID - pipelineID component.ID - component.Component -} - -func newProcessorNode(pipelineID, procID component.ID) *processorNode { - return &processorNode{ - nodeID: newNodeID(processorSeed, pipelineID.String(), procID.String()), - componentID: procID, - pipelineID: pipelineID, - } -} - -func (n *processorNode) getConsumer() baseConsumer { - return n.Component.(baseConsumer) -} - -func (n *processorNode) buildComponent(ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *processor.Builder, - next baseConsumer, -) error { - set := processor.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ProcessorLogger(set.TelemetrySettings.Logger, n.componentID, n.pipelineID) - var err error - switch n.pipelineID.Type() { - case component.DataTypeTraces: - n.Component, err = builder.CreateTraces(ctx, set, next.(consumer.Traces)) - case component.DataTypeMetrics: - n.Component, err = builder.CreateMetrics(ctx, set, next.(consumer.Metrics)) - case component.DataTypeLogs: - n.Component, err = builder.CreateLogs(ctx, set, next.(consumer.Logs)) - default: - return fmt.Errorf("error creating processor %q in pipeline %q, data type %q is not supported", set.ID, n.pipelineID, n.pipelineID.Type()) - } - if err != nil { - return fmt.Errorf("failed to create %q processor, in pipeline %q: %w", set.ID, n.pipelineID, err) - } - return nil -} - -var _ consumerNode = (*exporterNode)(nil) - -// An exporter instance can be shared by multiple pipelines of the same type. -// Therefore, nodeID is derived from "pipeline type" and "component ID". -type exporterNode struct { - nodeID - componentID component.ID - pipelineType component.DataType - component.Component -} - -func newExporterNode(pipelineType component.DataType, exprID component.ID) *exporterNode { - return &exporterNode{ - nodeID: newNodeID(exporterSeed, pipelineType.String(), exprID.String()), - componentID: exprID, - pipelineType: pipelineType, - } -} - -func (n *exporterNode) getConsumer() baseConsumer { - return n.Component.(baseConsumer) -} - -func (n *exporterNode) buildComponent( - ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *exporter.Builder, -) error { - set := exporter.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ExporterLogger(set.TelemetrySettings.Logger, n.componentID, n.pipelineType) - var err error - switch n.pipelineType { - case component.DataTypeTraces: - n.Component, err = builder.CreateTraces(ctx, set) - case component.DataTypeMetrics: - n.Component, err = builder.CreateMetrics(ctx, set) - case component.DataTypeLogs: - n.Component, err = builder.CreateLogs(ctx, set) - default: - return fmt.Errorf("error creating exporter %q for data type %q is not supported", set.ID, n.pipelineType) - } - if err != nil { - return fmt.Errorf("failed to create %q exporter for data type %q: %w", set.ID, n.pipelineType, err) - } - return nil -} - -var _ consumerNode = (*connectorNode)(nil) - -// A connector instance connects one pipeline type to one other pipeline type. -// Therefore, nodeID is derived from "exporter pipeline type", "receiver pipeline type", and "component ID". -type connectorNode struct { - nodeID - componentID component.ID - exprPipelineType component.DataType - rcvrPipelineType component.DataType - component.Component - baseConsumer -} - -func newConnectorNode(exprPipelineType, rcvrPipelineType component.DataType, connID component.ID) *connectorNode { - return &connectorNode{ - nodeID: newNodeID(connectorSeed, connID.String(), exprPipelineType.String(), rcvrPipelineType.String()), - componentID: connID, - exprPipelineType: exprPipelineType, - rcvrPipelineType: rcvrPipelineType, - } -} - -func (n *connectorNode) getConsumer() baseConsumer { - return n.baseConsumer -} - -func (n *connectorNode) buildComponent( - ctx context.Context, - tel component.TelemetrySettings, - info component.BuildInfo, - builder *connector.Builder, - nexts []baseConsumer, -) error { - set := connector.CreateSettings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} - set.TelemetrySettings.Logger = components.ConnectorLogger(set.TelemetrySettings.Logger, n.componentID, n.exprPipelineType, n.rcvrPipelineType) - - switch n.rcvrPipelineType { - case component.DataTypeTraces: - capability := consumer.Capabilities{MutatesData: false} - consumers := make(map[component.ID]consumer.Traces, len(nexts)) - for _, next := range nexts { - consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Traces) - capability.MutatesData = capability.MutatesData || next.Capabilities().MutatesData - } - next := connector.NewTracesRouter(consumers) - - switch n.exprPipelineType { - case component.DataTypeTraces: - conn, err := builder.CreateTracesToTraces(ctx, set, next) - if err != nil { - return err - } - n.Component = conn - // When connecting pipelines of the same data type, the connector must - // inherit the capabilities of pipelines in which it is acting as a receiver. - // Since the incoming and outgoing data types are the same, we must also consider - // that the connector itself may MutatesData. - capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData - n.baseConsumer = capabilityconsumer.NewTraces(conn, capability) - case component.DataTypeMetrics: - conn, err := builder.CreateMetricsToTraces(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeLogs: - conn, err := builder.CreateLogsToTraces(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - } - - case component.DataTypeMetrics: - capability := consumer.Capabilities{MutatesData: false} - consumers := make(map[component.ID]consumer.Metrics, len(nexts)) - for _, next := range nexts { - consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Metrics) - capability.MutatesData = capability.MutatesData || next.Capabilities().MutatesData - } - next := connector.NewMetricsRouter(consumers) - - switch n.exprPipelineType { - case component.DataTypeTraces: - conn, err := builder.CreateTracesToMetrics(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeMetrics: - conn, err := builder.CreateMetricsToMetrics(ctx, set, next) - if err != nil { - return err - } - n.Component = conn - // When connecting pipelines of the same data type, the connector must - // inherit the capabilities of pipelines in which it is acting as a receiver. - // Since the incoming and outgoing data types are the same, we must also consider - // that the connector itself may MutatesData. - capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData - n.baseConsumer = capabilityconsumer.NewMetrics(conn, capability) - case component.DataTypeLogs: - conn, err := builder.CreateLogsToMetrics(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - } - case component.DataTypeLogs: - capability := consumer.Capabilities{MutatesData: false} - consumers := make(map[component.ID]consumer.Logs, len(nexts)) - for _, next := range nexts { - consumers[next.(*capabilitiesNode).pipelineID] = next.(consumer.Logs) - capability.MutatesData = capability.MutatesData || next.Capabilities().MutatesData - } - next := connector.NewLogsRouter(consumers) - - switch n.exprPipelineType { - case component.DataTypeTraces: - conn, err := builder.CreateTracesToLogs(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeMetrics: - conn, err := builder.CreateMetricsToLogs(ctx, set, next) - if err != nil { - return err - } - n.Component, n.baseConsumer = conn, conn - case component.DataTypeLogs: - conn, err := builder.CreateLogsToLogs(ctx, set, next) - if err != nil { - return err - } - n.Component = conn - // When connecting pipelines of the same data type, the connector must - // inherit the capabilities of pipelines in which it is acting as a receiver. - // Since the incoming and outgoing data types are the same, we must also consider - // that the connector itself may MutatesData. - capability.MutatesData = capability.MutatesData || conn.Capabilities().MutatesData - n.baseConsumer = capabilityconsumer.NewLogs(conn, capability) - } - } - return nil -} - -var _ consumerNode = (*capabilitiesNode)(nil) - -// Every pipeline has a "virtual" capabilities node immediately after the receiver(s). -// There are two purposes for this node: -// 1. Present aggregated capabilities to receivers, such as whether the pipeline mutates data. -// 2. Present a consistent "first consumer" for each pipeline. -// The nodeID is derived from "pipeline ID". -type capabilitiesNode struct { - nodeID - pipelineID component.ID - baseConsumer - consumer.ConsumeTracesFunc - consumer.ConsumeMetricsFunc - consumer.ConsumeLogsFunc -} - -func newCapabilitiesNode(pipelineID component.ID) *capabilitiesNode { - return &capabilitiesNode{ - nodeID: newNodeID(capabilitiesSeed, pipelineID.String()), - pipelineID: pipelineID, - } -} - -func (n *capabilitiesNode) getConsumer() baseConsumer { - return n -} - -var _ consumerNode = (*fanOutNode)(nil) - -// Each pipeline has one fan-out node before exporters. -// Therefore, nodeID is derived from "pipeline ID". -type fanOutNode struct { - nodeID - pipelineID component.ID - baseConsumer -} - -func newFanOutNode(pipelineID component.ID) *fanOutNode { - return &fanOutNode{ - nodeID: newNodeID(fanOutToExporters, pipelineID.String()), - pipelineID: pipelineID, - } -} - -func (n *fanOutNode) getConsumer() baseConsumer { - return n.baseConsumer -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go new file mode 100644 index 00000000000..3288a505d80 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/processor.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/components" +) + +const processorSeed = "processor" + +var _ consumerNode = (*processorNode)(nil) + +// Every processor instance is unique to one pipeline. +// Therefore, nodeID is derived from "pipeline ID" and "component ID". +type processorNode struct { + nodeID + componentID component.ID + pipelineID pipeline.ID + component.Component +} + +func newProcessorNode(pipelineID pipeline.ID, procID component.ID) *processorNode { + return &processorNode{ + nodeID: newNodeID(processorSeed, pipelineID.String(), procID.String()), + componentID: procID, + pipelineID: pipelineID, + } +} + +func (n *processorNode) getConsumer() baseConsumer { + return n.Component.(baseConsumer) +} + +func (n *processorNode) buildComponent(ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ProcessorBuilder, + next baseConsumer, +) error { + tel.Logger = components.ProcessorLogger(tel.Logger, n.componentID, n.pipelineID) + set := processor.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + var err error + switch n.pipelineID.Signal() { + case pipeline.SignalTraces: + n.Component, err = builder.CreateTraces(ctx, set, next.(consumer.Traces)) + case pipeline.SignalMetrics: + n.Component, err = builder.CreateMetrics(ctx, set, next.(consumer.Metrics)) + case pipeline.SignalLogs: + n.Component, err = builder.CreateLogs(ctx, set, next.(consumer.Logs)) + case xpipeline.SignalProfiles: + n.Component, err = builder.CreateProfiles(ctx, set, next.(xconsumer.Profiles)) + default: + return fmt.Errorf("error creating processor %q in pipeline %q, data type %q is not supported", set.ID, n.pipelineID.String(), n.pipelineID.Signal()) + } + if err != nil { + return fmt.Errorf("failed to create %q processor, in pipeline %q: %w", set.ID, n.pipelineID.String(), err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go new file mode 100644 index 00000000000..48f7a36d148 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/receiver.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package graph // import "go.opentelemetry.io/collector/service/internal/graph" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/xconsumer" + "go.opentelemetry.io/collector/internal/fanoutconsumer" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/service/internal/builders" + "go.opentelemetry.io/collector/service/internal/components" +) + +const receiverSeed = "receiver" + +// A receiver instance can be shared by multiple pipelines of the same type. +// Therefore, nodeID is derived from "pipeline type" and "component ID". +type receiverNode struct { + nodeID + componentID component.ID + pipelineType pipeline.Signal + component.Component +} + +func newReceiverNode(pipelineType pipeline.Signal, recvID component.ID) *receiverNode { + return &receiverNode{ + nodeID: newNodeID(receiverSeed, pipelineType.String(), recvID.String()), + componentID: recvID, + pipelineType: pipelineType, + } +} + +func (n *receiverNode) buildComponent(ctx context.Context, + tel component.TelemetrySettings, + info component.BuildInfo, + builder *builders.ReceiverBuilder, + nexts []baseConsumer, +) error { + tel.Logger = components.ReceiverLogger(tel.Logger, n.componentID, n.pipelineType) + set := receiver.Settings{ID: n.componentID, TelemetrySettings: tel, BuildInfo: info} + var err error + switch n.pipelineType { + case pipeline.SignalTraces: + var consumers []consumer.Traces + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Traces)) + } + n.Component, err = builder.CreateTraces(ctx, set, fanoutconsumer.NewTraces(consumers)) + case pipeline.SignalMetrics: + var consumers []consumer.Metrics + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Metrics)) + } + n.Component, err = builder.CreateMetrics(ctx, set, fanoutconsumer.NewMetrics(consumers)) + case pipeline.SignalLogs: + var consumers []consumer.Logs + for _, next := range nexts { + consumers = append(consumers, next.(consumer.Logs)) + } + n.Component, err = builder.CreateLogs(ctx, set, fanoutconsumer.NewLogs(consumers)) + case xpipeline.SignalProfiles: + var consumers []xconsumer.Profiles + for _, next := range nexts { + consumers = append(consumers, next.(xconsumer.Profiles)) + } + n.Component, err = builder.CreateProfiles(ctx, set, fanoutconsumer.NewProfiles(consumers)) + default: + return fmt.Errorf("error creating receiver %q for data type %q is not supported", set.ID, n.pipelineType) + } + if err != nil { + return fmt.Errorf("failed to create %q receiver for data type %q: %w", set.ID, n.pipelineType, err) + } + return nil +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go b/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go index 0f2793974e9..4b7f2c5ae6d 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/graph/zpages.go @@ -55,7 +55,7 @@ func (g *Graph) HandleZPages(w http.ResponseWriter, r *http.Request) { sumData.Rows = append(sumData.Rows, zpages.SummaryPipelinesTableRowData{ FullName: c.String(), - InputType: c.Type().String(), + InputType: c.Signal().String(), MutatesData: p.capabilitiesNode.getConsumer().Capabilities().MutatesData, Receivers: recvIDs, Processors: procIDs, diff --git a/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go new file mode 100644 index 00000000000..e8d2360a85d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/internal/metadata/generated_telemetry.go @@ -0,0 +1,179 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("go.opentelemetry.io/collector/service") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("go.opentelemetry.io/collector/service") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ProcessCPUSeconds metric.Float64ObservableCounter + observeProcessCPUSeconds func(context.Context, metric.Observer) error + ProcessMemoryRss metric.Int64ObservableGauge + observeProcessMemoryRss func(context.Context, metric.Observer) error + ProcessRuntimeHeapAllocBytes metric.Int64ObservableGauge + observeProcessRuntimeHeapAllocBytes func(context.Context, metric.Observer) error + ProcessRuntimeTotalAllocBytes metric.Int64ObservableCounter + observeProcessRuntimeTotalAllocBytes func(context.Context, metric.Observer) error + ProcessRuntimeTotalSysMemoryBytes metric.Int64ObservableGauge + observeProcessRuntimeTotalSysMemoryBytes func(context.Context, metric.Observer) error + ProcessUptime metric.Float64ObservableCounter + observeProcessUptime func(context.Context, metric.Observer) error +} + +// TelemetryBuilderOption applies changes to default builder. +type TelemetryBuilderOption interface { + apply(*TelemetryBuilder) +} + +type telemetryBuilderOptionFunc func(mb *TelemetryBuilder) + +func (tbof telemetryBuilderOptionFunc) apply(mb *TelemetryBuilder) { + tbof(mb) +} + +// WithProcessCPUSecondsCallback sets callback for observable ProcessCPUSeconds metric. +func WithProcessCPUSecondsCallback(cb func() float64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessCPUSeconds = func(_ context.Context, o metric.Observer) error { + o.ObserveFloat64(builder.ProcessCPUSeconds, cb(), opts...) + return nil + } + }) +} + +// WithProcessMemoryRssCallback sets callback for observable ProcessMemoryRss metric. +func WithProcessMemoryRssCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessMemoryRss = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessMemoryRss, cb(), opts...) + return nil + } + }) +} + +// WithProcessRuntimeHeapAllocBytesCallback sets callback for observable ProcessRuntimeHeapAllocBytes metric. +func WithProcessRuntimeHeapAllocBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeHeapAllocBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessRuntimeHeapAllocBytes, cb(), opts...) + return nil + } + }) +} + +// WithProcessRuntimeTotalAllocBytesCallback sets callback for observable ProcessRuntimeTotalAllocBytes metric. +func WithProcessRuntimeTotalAllocBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeTotalAllocBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessRuntimeTotalAllocBytes, cb(), opts...) + return nil + } + }) +} + +// WithProcessRuntimeTotalSysMemoryBytesCallback sets callback for observable ProcessRuntimeTotalSysMemoryBytes metric. +func WithProcessRuntimeTotalSysMemoryBytesCallback(cb func() int64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessRuntimeTotalSysMemoryBytes = func(_ context.Context, o metric.Observer) error { + o.ObserveInt64(builder.ProcessRuntimeTotalSysMemoryBytes, cb(), opts...) + return nil + } + }) +} + +// WithProcessUptimeCallback sets callback for observable ProcessUptime metric. +func WithProcessUptimeCallback(cb func() float64, opts ...metric.ObserveOption) TelemetryBuilderOption { + return telemetryBuilderOptionFunc(func(builder *TelemetryBuilder) { + builder.observeProcessUptime = func(_ context.Context, o metric.Observer) error { + o.ObserveFloat64(builder.ProcessUptime, cb(), opts...) + return nil + } + }) +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...TelemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{} + for _, op := range options { + op.apply(&builder) + } + builder.meter = Meter(settings) + var err, errs error + builder.ProcessCPUSeconds, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Float64ObservableCounter( + "otelcol_process_cpu_seconds", + metric.WithDescription("Total CPU user and system time in seconds [alpha]"), + metric.WithUnit("s"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessCPUSeconds, builder.ProcessCPUSeconds) + errs = errors.Join(errs, err) + builder.ProcessMemoryRss, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableGauge( + "otelcol_process_memory_rss", + metric.WithDescription("Total physical memory (resident set size) [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessMemoryRss, builder.ProcessMemoryRss) + errs = errors.Join(errs, err) + builder.ProcessRuntimeHeapAllocBytes, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableGauge( + "otelcol_process_runtime_heap_alloc_bytes", + metric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessRuntimeHeapAllocBytes, builder.ProcessRuntimeHeapAllocBytes) + errs = errors.Join(errs, err) + builder.ProcessRuntimeTotalAllocBytes, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableCounter( + "otelcol_process_runtime_total_alloc_bytes", + metric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessRuntimeTotalAllocBytes, builder.ProcessRuntimeTotalAllocBytes) + errs = errors.Join(errs, err) + builder.ProcessRuntimeTotalSysMemoryBytes, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Int64ObservableGauge( + "otelcol_process_runtime_total_sys_memory_bytes", + metric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') [alpha]"), + metric.WithUnit("By"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessRuntimeTotalSysMemoryBytes, builder.ProcessRuntimeTotalSysMemoryBytes) + errs = errors.Join(errs, err) + builder.ProcessUptime, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).Float64ObservableCounter( + "otelcol_process_uptime", + metric.WithDescription("Uptime of the process [alpha]"), + metric.WithUnit("s"), + ) + errs = errors.Join(errs, err) + _, err = getLeveledMeter(builder.meter, configtelemetry.LevelBasic, settings.MetricsLevel).RegisterCallback(builder.observeProcessUptime, builder.ProcessUptime) + errs = errors.Join(errs, err) + return &builder, errs +} + +func getLeveledMeter(meter metric.Meter, cfgLevel, srvLevel configtelemetry.Level) metric.Meter { + if cfgLevel <= srvLevel { + return meter + } + return noop.Meter{} +} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go index 991897f8b1b..de60d4518ed 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/process_telemetry.go @@ -10,31 +10,19 @@ import ( "sync" "time" - "github.com/shirou/gopsutil/v3/common" - "github.com/shirou/gopsutil/v3/process" - otelmetric "go.opentelemetry.io/otel/metric" - "go.uber.org/multierr" -) + "github.com/shirou/gopsutil/v4/common" + "github.com/shirou/gopsutil/v4/process" -const ( - scopeName = "go.opentelemetry.io/collector/service/process_telemetry" - processNameKey = "process_name" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/service/internal/metadata" ) // processMetrics is a struct that contains views related to process metrics (cpu, mem, etc) type processMetrics struct { startTimeUnixNano int64 - ballastSizeBytes uint64 proc *process.Process context context.Context - otelProcessUptime otelmetric.Float64ObservableCounter - otelAllocMem otelmetric.Int64ObservableGauge - otelTotalAllocMem otelmetric.Int64ObservableCounter - otelSysMem otelmetric.Int64ObservableGauge - otelCPUSeconds otelmetric.Float64ObservableCounter - otelRSSMemory otelmetric.Int64ObservableGauge - // mu protects everything bellow. mu sync.Mutex lastMsRead time.Time @@ -64,7 +52,7 @@ func WithHostProc(hostProc string) RegisterOption { // RegisterProcessMetrics creates a new set of processMetrics (mem, cpu) that can be used to measure // basic information about this process. -func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64, opts ...RegisterOption) error { +func RegisterProcessMetrics(cfg component.TelemetrySettings, opts ...RegisterOption) error { set := registerOption{} for _, opt := range opts { opt.apply(&set) @@ -72,7 +60,6 @@ func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64 var err error pm := &processMetrics{ startTimeUnixNano: time.Now().UnixNano(), - ballastSizeBytes: ballastSizeBytes, ms: &runtime.MemStats{}, } @@ -81,78 +68,21 @@ func RegisterProcessMetrics(mp otelmetric.MeterProvider, ballastSizeBytes uint64 ctx = context.WithValue(ctx, common.EnvKey, common.EnvMap{common.HostProcEnvKey: set.hostProc}) } pm.context = ctx + // nolint:gosec pm.proc, err = process.NewProcessWithContext(pm.context, int32(os.Getpid())) if err != nil { return err } - return pm.record(mp.Meter(scopeName)) -} - -func (pm *processMetrics) record(meter otelmetric.Meter) error { - var errs, err error - - pm.otelProcessUptime, err = meter.Float64ObservableCounter( - "process_uptime", - otelmetric.WithDescription("Uptime of the process"), - otelmetric.WithUnit("s"), - otelmetric.WithFloat64Callback(func(_ context.Context, o otelmetric.Float64Observer) error { - o.Observe(pm.updateProcessUptime()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelAllocMem, err = meter.Int64ObservableGauge( - "process_runtime_heap_alloc_bytes", - otelmetric.WithDescription("Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateAllocMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelTotalAllocMem, err = meter.Int64ObservableCounter( - "process_runtime_total_alloc_bytes", - otelmetric.WithDescription("Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateTotalAllocMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelSysMem, err = meter.Int64ObservableGauge( - "process_runtime_total_sys_memory_bytes", - otelmetric.WithDescription("Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys')"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateSysMem()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelCPUSeconds, err = meter.Float64ObservableCounter( - "process_cpu_seconds", - otelmetric.WithDescription("Total CPU user and system time in seconds"), - otelmetric.WithUnit("s"), - otelmetric.WithFloat64Callback(func(_ context.Context, o otelmetric.Float64Observer) error { - o.Observe(pm.updateCPUSeconds()) - return nil - })) - errs = multierr.Append(errs, err) - - pm.otelRSSMemory, err = meter.Int64ObservableGauge( - "process_memory_rss", - otelmetric.WithDescription("Total physical memory (resident set size)"), - otelmetric.WithUnit("By"), - otelmetric.WithInt64Callback(func(_ context.Context, o otelmetric.Int64Observer) error { - o.Observe(pm.updateRSSMemory()) - return nil - })) - errs = multierr.Append(errs, err) - - return errs + _, err = metadata.NewTelemetryBuilder(cfg, + metadata.WithProcessUptimeCallback(pm.updateProcessUptime), + metadata.WithProcessRuntimeHeapAllocBytesCallback(pm.updateAllocMem), + metadata.WithProcessRuntimeTotalAllocBytesCallback(pm.updateTotalAllocMem), + metadata.WithProcessRuntimeTotalSysMemoryBytesCallback(pm.updateSysMem), + metadata.WithProcessCPUSecondsCallback(pm.updateCPUSeconds), + metadata.WithProcessMemoryRssCallback(pm.updateRSSMemory), + ) + return err } func (pm *processMetrics) updateProcessUptime() float64 { @@ -164,6 +94,7 @@ func (pm *processMetrics) updateAllocMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.Alloc) } @@ -171,6 +102,7 @@ func (pm *processMetrics) updateTotalAllocMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.TotalAlloc) } @@ -178,6 +110,7 @@ func (pm *processMetrics) updateSysMem() int64 { pm.mu.Lock() defer pm.mu.Unlock() pm.readMemStatsIfNeeded() + // nolint:gosec return int64(pm.ms.Sys) } @@ -196,6 +129,7 @@ func (pm *processMetrics) updateRSSMemory() int64 { if err != nil { return 0 } + // nolint:gosec return int64(mem.RSS) } @@ -207,10 +141,4 @@ func (pm *processMetrics) readMemStatsIfNeeded() { } pm.lastMsRead = now runtime.ReadMemStats(pm.ms) - if pm.ballastSizeBytes > 0 { - pm.ms.Alloc -= pm.ballastSizeBytes - pm.ms.HeapAlloc -= pm.ballastSizeBytes - pm.ms.HeapSys -= pm.ballastSizeBytes - pm.ms.HeapInuse -= pm.ballastSizeBytes - } } diff --git a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go b/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go deleted file mode 100644 index 116554a51f8..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/nop_telemetry_settings.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package servicetelemetry // import "go.opentelemetry.io/collector/service/internal/servicetelemetry" - -import ( - noopmetric "go.opentelemetry.io/otel/metric/noop" - nooptrace "go.opentelemetry.io/otel/trace/noop" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/service/internal/status" -) - -// NewNopTelemetrySettings returns a new nop settings for Create* functions. -func NewNopTelemetrySettings() TelemetrySettings { - return TelemetrySettings{ - Logger: zap.NewNop(), - TracerProvider: nooptrace.NewTracerProvider(), - MeterProvider: noopmetric.NewMeterProvider(), - MetricsLevel: configtelemetry.LevelNone, - Resource: pcommon.NewResource(), - Status: status.NewReporter(func(*component.InstanceID, *component.StatusEvent) {}, func(error) {}), - } -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go b/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go deleted file mode 100644 index 55c6b9f3962..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/internal/servicetelemetry/telemetry_settings.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package servicetelemetry // import "go.opentelemetry.io/collector/service/internal/servicetelemetry" - -import ( - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/service/internal/status" -) - -// TelemetrySettings mirrors component.TelemetrySettings except for the mechanism for reporting -// status. Service-level status reporting has additional methods which can report status for -// components by their InstanceID whereas the component versions are tied to a specific component. -type TelemetrySettings struct { - // Logger that the factory can use during creation and can pass to the created - // component to be used later as well. - Logger *zap.Logger - - // TracerProvider that the factory can pass to other instrumented third-party libraries. - TracerProvider trace.TracerProvider - - // MeterProvider that the factory can pass to other instrumented third-party libraries. - MeterProvider metric.MeterProvider - - // MetricsLevel controls the level of detail for metrics emitted by the collector. - // Experimental: *NOTE* this field is experimental and may be changed or removed. - MetricsLevel configtelemetry.Level - - // Resource contains the resource attributes for the collector's telemetry. - Resource pcommon.Resource - - // Status contains a Reporter that allows the service to report status on behalf of a - // component. - Status *status.Reporter -} - -// ToComponentTelemetrySettings returns a TelemetrySettings for a specific component derived from -// this service level Settings object. -func (s TelemetrySettings) ToComponentTelemetrySettings(id *component.InstanceID) component.TelemetrySettings { - statusFunc := status.NewReportStatusFunc(id, s.Status.ReportStatus) - return component.TelemetrySettings{ - Logger: s.Logger, - TracerProvider: s.TracerProvider, - MeterProvider: s.MeterProvider, - MetricsLevel: s.MetricsLevel, - Resource: s.Resource, - ReportStatus: statusFunc, - } -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/status/status.go b/vendor/go.opentelemetry.io/collector/service/internal/status/status.go index e1d524906e5..a27bff618dd 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/status/status.go +++ b/vendor/go.opentelemetry.io/collector/service/internal/status/status.go @@ -8,26 +8,26 @@ import ( "fmt" "sync" - "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentstatus" ) -// onTransitionFunc receives a component.StatusEvent on a successful state transition -type onTransitionFunc func(*component.StatusEvent) +// onTransitionFunc receives a componentstatus.Event on a successful state transition +type onTransitionFunc func(*componentstatus.Event) // errInvalidStateTransition is returned for invalid state transitions var errInvalidStateTransition = errors.New("invalid state transition") // fsm is a finite state machine that models transitions for component status type fsm struct { - current *component.StatusEvent - transitions map[component.Status]map[component.Status]struct{} + current *componentstatus.Event + transitions map[componentstatus.Status]map[componentstatus.Status]struct{} onTransition onTransitionFunc } // transition will attempt to execute a state transition. If it's successful, it calls the -// onTransitionFunc with a StatusEvent representing the new state. Returns an error if the arguments +// onTransitionFunc with a Event representing the new state. Returns an error if the arguments // result in an invalid status, or if the state transition is not valid. -func (m *fsm) transition(ev *component.StatusEvent) error { +func (m *fsm) transition(ev *componentstatus.Event) error { if _, ok := m.transitions[m.current.Status()][ev.Status()]; !ok { return fmt.Errorf( "cannot transition from %s to %s: %w", @@ -41,134 +41,123 @@ func (m *fsm) transition(ev *component.StatusEvent) error { return nil } -// newFSM creates a state machine with all valid transitions for component.Status. -// The initial state is set to component.StatusNone. +// newFSM creates a state machine with all valid transitions for componentstatus.Status. +// The initial state is set to componentstatus.StatusNone. func newFSM(onTransition onTransitionFunc) *fsm { return &fsm{ - current: component.NewStatusEvent(component.StatusNone), + current: componentstatus.NewEvent(componentstatus.StatusNone), onTransition: onTransition, - transitions: map[component.Status]map[component.Status]struct{}{ - component.StatusNone: { - component.StatusStarting: {}, + transitions: map[componentstatus.Status]map[componentstatus.Status]struct{}{ + componentstatus.StatusNone: { + componentstatus.StatusStarting: {}, }, - component.StatusStarting: { - component.StatusOK: {}, - component.StatusRecoverableError: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: {}, + componentstatus.StatusStarting: { + componentstatus.StatusOK: {}, + componentstatus.StatusRecoverableError: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: {}, }, - component.StatusOK: { - component.StatusRecoverableError: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: {}, + componentstatus.StatusOK: { + componentstatus.StatusRecoverableError: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: {}, }, - component.StatusRecoverableError: { - component.StatusOK: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: {}, + componentstatus.StatusRecoverableError: { + componentstatus.StatusOK: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: {}, }, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopping: { - component.StatusRecoverableError: {}, - component.StatusPermanentError: {}, - component.StatusFatalError: {}, - component.StatusStopped: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopping: { + componentstatus.StatusRecoverableError: {}, + componentstatus.StatusPermanentError: {}, + componentstatus.StatusFatalError: {}, + componentstatus.StatusStopped: {}, }, - component.StatusStopped: {}, + componentstatus.StatusStopped: {}, }, } } // NotifyStatusFunc is the receiver of status events after successful state transitions -type NotifyStatusFunc func(*component.InstanceID, *component.StatusEvent) +type NotifyStatusFunc func(*componentstatus.InstanceID, *componentstatus.Event) // InvalidTransitionFunc is the receiver of invalid transition errors type InvalidTransitionFunc func(error) -// ServiceStatusFunc is the expected type of ReportStatus for servicetelemetry.Settings -type ServiceStatusFunc func(*component.InstanceID, *component.StatusEvent) +// ServiceStatusFunc is the expected type of ReportStatus +type ServiceStatusFunc func(*componentstatus.InstanceID, *componentstatus.Event) // ErrStatusNotReady is returned when trying to report status before service start var ErrStatusNotReady = errors.New("report component status is not ready until service start") // Reporter handles component status reporting -type Reporter struct { +type Reporter interface { + ReportStatus(id *componentstatus.InstanceID, ev *componentstatus.Event) + ReportOKIfStarting(id *componentstatus.InstanceID) +} + +type reporter struct { mu sync.Mutex - ready bool - fsmMap map[*component.InstanceID]*fsm + fsmMap map[*componentstatus.InstanceID]*fsm onStatusChange NotifyStatusFunc onInvalidTransition InvalidTransitionFunc } // NewReporter returns a reporter that will invoke the NotifyStatusFunc when a component's status // has changed. -func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTransitionFunc) *Reporter { - return &Reporter{ - fsmMap: make(map[*component.InstanceID]*fsm), +func NewReporter(onStatusChange NotifyStatusFunc, onInvalidTransition InvalidTransitionFunc) Reporter { + return &reporter{ + fsmMap: make(map[*componentstatus.InstanceID]*fsm), onStatusChange: onStatusChange, onInvalidTransition: onInvalidTransition, } } -// Ready enables status reporting -func (r *Reporter) Ready() { - r.mu.Lock() - defer r.mu.Unlock() - r.ready = true -} - // ReportStatus reports status for the given InstanceID -func (r *Reporter) ReportStatus( - id *component.InstanceID, - ev *component.StatusEvent, +func (r *reporter) ReportStatus( + id *componentstatus.InstanceID, + ev *componentstatus.Event, ) { r.mu.Lock() defer r.mu.Unlock() - if !r.ready { - r.onInvalidTransition(ErrStatusNotReady) - } else { - if err := r.componentFSM(id).transition(ev); err != nil { - r.onInvalidTransition(err) - } + + if err := r.componentFSM(id).transition(ev); err != nil { + r.onInvalidTransition(err) } } -func (r *Reporter) ReportOKIfStarting(id *component.InstanceID) { +func (r *reporter) ReportOKIfStarting(id *componentstatus.InstanceID) { r.mu.Lock() defer r.mu.Unlock() - if !r.ready { - r.onInvalidTransition(ErrStatusNotReady) - } fsm := r.componentFSM(id) - if fsm.current.Status() == component.StatusStarting { - if err := fsm.transition(component.NewStatusEvent(component.StatusOK)); err != nil { + if fsm.current.Status() == componentstatus.StatusStarting { + if err := fsm.transition(componentstatus.NewEvent(componentstatus.StatusOK)); err != nil { r.onInvalidTransition(err) } } } // Note: a lock must be acquired before calling this method. -func (r *Reporter) componentFSM(id *component.InstanceID) *fsm { +func (r *reporter) componentFSM(id *componentstatus.InstanceID) *fsm { fsm, ok := r.fsmMap[id] if !ok { - fsm = newFSM(func(ev *component.StatusEvent) { r.onStatusChange(id, ev) }) + fsm = newFSM(func(ev *componentstatus.Event) { r.onStatusChange(id, ev) }) r.fsmMap[id] = fsm } return fsm } -// NewReportStatusFunc returns a function to be used as ReportStatus for -// component.TelemetrySettings, which differs from servicetelemetry.Settings in that -// the component version is tied to specific component instance. +// NewReportStatusFunc returns a function to be used as ReportStatus for componentstatus.TelemetrySettings func NewReportStatusFunc( - id *component.InstanceID, + id *componentstatus.InstanceID, srvStatus ServiceStatusFunc, -) func(*component.StatusEvent) { - return func(ev *component.StatusEvent) { +) func(*componentstatus.Event) { + return func(ev *componentstatus.Event) { srvStatus(id, ev) } } diff --git a/vendor/go.opentelemetry.io/collector/service/metadata.yaml b/vendor/go.opentelemetry.io/collector/service/metadata.yaml new file mode 100644 index 00000000000..a1531f7e382 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/metadata.yaml @@ -0,0 +1,73 @@ +type: service +github_project: open-telemetry/opentelemetry-collector + +status: + class: pkg + stability: + development: [traces, metrics, logs] + distributions: [core, contrib] + +telemetry: + metrics: + process_uptime: + enabled: true + stability: + level: alpha + description: Uptime of the process + unit: s + sum: + async: true + value_type: double + monotonic: true + + process_runtime_heap_alloc_bytes: + enabled: true + stability: + level: alpha + description: Bytes of allocated heap objects (see 'go doc runtime.MemStats.HeapAlloc') + unit: By + gauge: + async: true + value_type: int + + process_runtime_total_alloc_bytes: + enabled: true + stability: + level: alpha + description: Cumulative bytes allocated for heap objects (see 'go doc runtime.MemStats.TotalAlloc') + unit: By + sum: + async: true + value_type: int + monotonic: true + + process_runtime_total_sys_memory_bytes: + enabled: true + stability: + level: alpha + description: Total bytes of memory obtained from the OS (see 'go doc runtime.MemStats.Sys') + unit: By + gauge: + async: true + value_type: int + + process_cpu_seconds: + enabled: true + stability: + level: alpha + description: Total CPU user and system time in seconds + unit: s + sum: + async: true + value_type: double + monotonic: true + + process_memory_rss: + enabled: true + stability: + level: alpha + description: Total physical memory (resident set size) + unit: By + gauge: + async: true + value_type: int diff --git a/vendor/go.opentelemetry.io/collector/service/pipelines/config.go b/vendor/go.opentelemetry.io/collector/service/pipelines/config.go index ed8c77a31c0..025d3c8395a 100644 --- a/vendor/go.opentelemetry.io/collector/service/pipelines/config.go +++ b/vendor/go.opentelemetry.io/collector/service/pipelines/config.go @@ -8,16 +8,27 @@ import ( "fmt" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/pipeline" + "go.opentelemetry.io/collector/pipeline/xpipeline" ) var ( errMissingServicePipelines = errors.New("service must have at least one pipeline") errMissingServicePipelineReceivers = errors.New("must have at least one receiver") errMissingServicePipelineExporters = errors.New("must have at least one exporter") + + serviceProfileSupportGateID = "service.profilesSupport" + serviceProfileSupportGate = featuregate.GlobalRegistry().MustRegister( + serviceProfileSupportGateID, + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.112.0"), + featuregate.WithRegisterDescription("Controls whether profiles support can be enabled"), + ) ) // Config defines the configurable settings for service telemetry. -type Config map[component.ID]*PipelineConfig +type Config map[pipeline.ID]*PipelineConfig func (cfg Config) Validate() error { // Must have at least one pipeline. @@ -27,14 +38,25 @@ func (cfg Config) Validate() error { // Check that all pipelines have at least one receiver and one exporter, and they reference // only configured components. - for pipelineID, pipeline := range cfg { - if pipelineID.Type() != component.DataTypeTraces && pipelineID.Type() != component.DataTypeMetrics && pipelineID.Type() != component.DataTypeLogs { - return fmt.Errorf("pipeline %q: unknown datatype %q", pipelineID, pipelineID.Type()) + for pipelineID, p := range cfg { + switch pipelineID.Signal() { + case pipeline.SignalTraces, pipeline.SignalMetrics, pipeline.SignalLogs: + // Continue + case xpipeline.SignalProfiles: + if !serviceProfileSupportGate.IsEnabled() { + return fmt.Errorf( + "pipeline %q: profiling signal support is at alpha level, gated under the %q feature gate", + pipelineID.String(), + serviceProfileSupportGateID, + ) + } + default: + return fmt.Errorf("pipeline %q: unknown signal %q", pipelineID.String(), pipelineID.Signal()) } // Validate pipeline has at least one receiver. - if err := pipeline.Validate(); err != nil { - return fmt.Errorf("pipeline %q: %w", pipelineID, err) + if err := p.Validate(); err != nil { + return fmt.Errorf("pipeline %q: %w", pipelineID.String(), err) } } diff --git a/vendor/go.opentelemetry.io/collector/service/service.go b/vendor/go.opentelemetry.io/collector/service/service.go index 53ea3eebe65..cf4b3475229 100644 --- a/vendor/go.opentelemetry.io/collector/service/service.go +++ b/vendor/go.opentelemetry.io/collector/service/service.go @@ -1,6 +1,8 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +//go:generate mdatagen metadata.yaml + package service // import "go.opentelemetry.io/collector/service" import ( @@ -9,6 +11,8 @@ import ( "fmt" "runtime" + "go.opentelemetry.io/contrib/config" + "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/metric" sdkresource "go.opentelemetry.io/otel/sdk/resource" "go.uber.org/multierr" @@ -20,20 +24,29 @@ import ( "go.opentelemetry.io/collector/connector" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/extension" - "go.opentelemetry.io/collector/internal/localhostgate" - "go.opentelemetry.io/collector/internal/obsreportconfig" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/receiver" + semconv "go.opentelemetry.io/collector/semconv/v1.26.0" "go.opentelemetry.io/collector/service/extensions" + "go.opentelemetry.io/collector/service/internal/builders" "go.opentelemetry.io/collector/service/internal/graph" "go.opentelemetry.io/collector/service/internal/proctelemetry" "go.opentelemetry.io/collector/service/internal/resource" - "go.opentelemetry.io/collector/service/internal/servicetelemetry" "go.opentelemetry.io/collector/service/internal/status" "go.opentelemetry.io/collector/service/telemetry" ) +// useOtelWithSDKConfigurationForInternalTelemetryFeatureGate is the feature gate that controls whether the collector +// supports configuring the OpenTelemetry SDK via configuration +var _ = featuregate.GlobalRegistry().MustRegister( + "telemetry.useOtelWithSDKConfigurationForInternalTelemetry", + featuregate.StageStable, + featuregate.WithRegisterToVersion("v0.110.0"), + featuregate.WithRegisterDescription("controls whether the collector supports extended OpenTelemetry"+ + "configuration for internal telemetry")) + // Settings holds configuration for building a new Service. type Settings struct { // BuildInfo provides collector start information. @@ -42,20 +55,31 @@ type Settings struct { // CollectorConf contains the Collector's current configuration CollectorConf *confmap.Conf - // Receivers builder for receivers. - Receivers *receiver.Builder + // Receivers configuration to its builder. + ReceiversConfigs map[component.ID]component.Config + ReceiversFactories map[component.Type]receiver.Factory - // Processors builder for processors. - Processors *processor.Builder + // Processors configuration to its builder. + ProcessorsConfigs map[component.ID]component.Config + ProcessorsFactories map[component.Type]processor.Factory - // Exporters builder for exporters. - Exporters *exporter.Builder + // exporters configuration to its builder. + ExportersConfigs map[component.ID]component.Config + ExportersFactories map[component.Type]exporter.Factory - // Connectors builder for connectors. - Connectors *connector.Builder + // Connectors configuration to its builder. + ConnectorsConfigs map[component.ID]component.Config + ConnectorsFactories map[component.Type]connector.Factory // Extensions builder for extensions. - Extensions *extension.Builder + Extensions builders.Extension + + // Extensions configuration to its builder. + ExtensionsConfigs map[component.ID]component.Config + ExtensionsFactories map[component.Type]extension.Factory + + // ModuleInfo describes the go module for each component. + ModuleInfo extension.ModuleInfo // AsyncErrorChannel is the channel that is used to report fatal errors. AsyncErrorChannel chan error @@ -67,25 +91,26 @@ type Settings struct { // Service represents the implementation of a component.Host. type Service struct { buildInfo component.BuildInfo - telemetrySettings servicetelemetry.TelemetrySettings - host *serviceHost + telemetrySettings component.TelemetrySettings + host *graph.Host collectorConf *confmap.Conf + loggerProvider log.LoggerProvider } // New creates a new Service, its telemetry, and Components. func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { - disableHighCard := obsreportconfig.DisableHighCardinalityMetricsfeatureGate.IsEnabled() - extendedConfig := obsreportconfig.UseOtelWithSDKConfigurationForInternalTelemetryFeatureGate.IsEnabled() srv := &Service{ buildInfo: set.BuildInfo, - host: &serviceHost{ - receivers: set.Receivers, - processors: set.Processors, - exporters: set.Exporters, - connectors: set.Connectors, - extensions: set.Extensions, - buildInfo: set.BuildInfo, - asyncErrorChannel: set.AsyncErrorChannel, + host: &graph.Host{ + Receivers: builders.NewReceiver(set.ReceiversConfigs, set.ReceiversFactories), + Processors: builders.NewProcessor(set.ProcessorsConfigs, set.ProcessorsFactories), + Exporters: builders.NewExporter(set.ExportersConfigs, set.ExportersFactories), + Connectors: builders.NewConnector(set.ConnectorsConfigs, set.ConnectorsFactories), + Extensions: builders.NewExtension(set.ExtensionsConfigs, set.ExtensionsFactories), + + ModuleInfo: set.ModuleInfo, + BuildInfo: set.BuildInfo, + AsyncErrorChannel: set.AsyncErrorChannel, }, collectorConf: set.CollectorConf, } @@ -94,16 +119,42 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { res := resource.New(set.BuildInfo, cfg.Telemetry.Resource) pcommonRes := pdataFromSdk(res) + sch := semconv.SchemaURL + cfgRes := config.Resource{ + SchemaUrl: &sch, + Attributes: attributes(res, cfg.Telemetry), + } + + sdk, err := config.NewSDK( + config.WithContext(ctx), + config.WithOpenTelemetryConfiguration( + config.OpenTelemetryConfiguration{ + LoggerProvider: &config.LoggerProvider{ + Processors: cfg.Telemetry.Logs.Processors, + }, + TracerProvider: &config.TracerProvider{ + Processors: cfg.Telemetry.Traces.Processors, + }, + Resource: &cfgRes, + }, + ), + ) + if err != nil { + return nil, fmt.Errorf("failed to create SDK: %w", err) + } + telFactory := telemetry.NewFactory() telset := telemetry.Settings{ BuildInfo: set.BuildInfo, ZapOptions: set.LoggingOptions, + SDK: &sdk, } - logger, err := telFactory.CreateLogger(ctx, telset, &cfg.Telemetry) + logger, lp, err := telFactory.CreateLogger(ctx, telset, &cfg.Telemetry) if err != nil { return nil, fmt.Errorf("failed to create logger: %w", err) } + srv.loggerProvider = lp tracerProvider, err := telFactory.CreateTracerProvider(ctx, telset, &cfg.Telemetry) if err != nil { @@ -111,55 +162,54 @@ func New(ctx context.Context, set Settings, cfg Config) (*Service, error) { } logger.Info("Setting up own telemetry...") - mp, err := newMeterProvider( - meterProviderSettings{ - res: res, - cfg: cfg.Telemetry.Metrics, - asyncErrorChannel: set.AsyncErrorChannel, - }, - disableHighCard, - ) + + mp, err := telFactory.CreateMeterProvider(ctx, telset, &cfg.Telemetry) if err != nil { - return nil, fmt.Errorf("failed to create metric provider: %w", err) + return nil, fmt.Errorf("failed to create meter provider: %w", err) } - logsAboutMeterProvider(logger, cfg.Telemetry.Metrics, mp, extendedConfig) - srv.telemetrySettings = servicetelemetry.TelemetrySettings{ + logsAboutMeterProvider(logger, cfg.Telemetry.Metrics, mp) + srv.telemetrySettings = component.TelemetrySettings{ Logger: logger, MeterProvider: mp, TracerProvider: tracerProvider, MetricsLevel: cfg.Telemetry.Metrics.Level, // Construct telemetry attributes from build info and config's resource attributes. Resource: pcommonRes, - Status: status.NewReporter(srv.host.notifyComponentStatusChange, func(err error) { - if errors.Is(err, status.ErrStatusNotReady) { - logger.Warn("Invalid transition", zap.Error(err)) - } - // ignore other errors as they represent invalid state transitions and are considered benign. - }), + } + srv.host.Reporter = status.NewReporter(srv.host.NotifyComponentStatusChange, func(err error) { + if errors.Is(err, status.ErrStatusNotReady) { + logger.Warn("Invalid transition", zap.Error(err)) + } + // ignore other errors as they represent invalid state transitions and are considered benign. + }) + + if err = srv.initGraph(ctx, cfg); err != nil { + err = multierr.Append(err, srv.shutdownTelemetry(ctx)) + return nil, err } // process the configuration and initialize the pipeline - if err = srv.initExtensionsAndPipeline(ctx, set, cfg); err != nil { - // If pipeline initialization fails then shut down telemetry + if err = srv.initExtensions(ctx, cfg.Extensions); err != nil { err = multierr.Append(err, srv.shutdownTelemetry(ctx)) return nil, err } + if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings); err != nil { + return nil, fmt.Errorf("failed to register process metrics: %w", err) + } + return srv, nil } -func logsAboutMeterProvider(logger *zap.Logger, cfg telemetry.MetricsConfig, mp metric.MeterProvider, extendedConfig bool) { - if cfg.Level == configtelemetry.LevelNone || (cfg.Address == "" && len(cfg.Readers) == 0) { - logger.Info( - "Skipped telemetry setup.", - zap.String(zapKeyTelemetryAddress, cfg.Address), - zap.Stringer(zapKeyTelemetryLevel, cfg.Level), - ) +func logsAboutMeterProvider(logger *zap.Logger, cfg telemetry.MetricsConfig, mp metric.MeterProvider) { + if cfg.Level == configtelemetry.LevelNone || len(cfg.Readers) == 0 { + logger.Info("Skipped telemetry setup.") return } - if len(cfg.Address) != 0 && extendedConfig { + //nolint + if len(cfg.Address) != 0 { logger.Warn("service::telemetry::metrics::address is being deprecated in favor of service::telemetry::metrics::readers") } @@ -182,29 +232,25 @@ func (srv *Service) Start(ctx context.Context) error { zap.Int("NumCPU", runtime.NumCPU()), ) - // enable status reporting - srv.telemetrySettings.Status.Ready() - - if err := srv.host.serviceExtensions.Start(ctx, srv.host); err != nil { + if err := srv.host.ServiceExtensions.Start(ctx, srv.host); err != nil { return fmt.Errorf("failed to start extensions: %w", err) } if srv.collectorConf != nil { - if err := srv.host.serviceExtensions.NotifyConfig(ctx, srv.collectorConf); err != nil { + if err := srv.host.ServiceExtensions.NotifyConfig(ctx, srv.collectorConf); err != nil { return err } } - if err := srv.host.pipelines.StartAll(ctx, srv.host); err != nil { + if err := srv.host.Pipelines.StartAll(ctx, srv.host); err != nil { return fmt.Errorf("cannot start pipelines: %w", err) } - if err := srv.host.serviceExtensions.NotifyPipelineReady(); err != nil { + if err := srv.host.ServiceExtensions.NotifyPipelineReady(); err != nil { return err } srv.telemetrySettings.Logger.Info("Everything is ready. Begin running and processing data.") - localhostgate.LogAboutUseLocalHostAsDefault(srv.telemetrySettings.Logger) return nil } @@ -227,6 +273,12 @@ func (srv *Service) shutdownTelemetry(ctx context.Context) error { err = multierr.Append(err, fmt.Errorf("failed to shutdown tracer provider: %w", shutdownErr)) } } + + if prov, ok := srv.loggerProvider.(shutdownable); ok { + if shutdownErr := prov.Shutdown(ctx); shutdownErr != nil { + err = multierr.Append(err, fmt.Errorf("failed to shutdown logger provider: %w", shutdownErr)) + } + } return err } @@ -242,15 +294,15 @@ func (srv *Service) Shutdown(ctx context.Context) error { // Begin shutdown sequence. srv.telemetrySettings.Logger.Info("Starting shutdown...") - if err := srv.host.serviceExtensions.NotifyPipelineNotReady(); err != nil { + if err := srv.host.ServiceExtensions.NotifyPipelineNotReady(); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to notify that pipeline is not ready: %w", err)) } - if err := srv.host.pipelines.ShutdownAll(ctx); err != nil { + if err := srv.host.Pipelines.ShutdownAll(ctx, srv.host.Reporter); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to shutdown pipelines: %w", err)) } - if err := srv.host.serviceExtensions.Shutdown(ctx); err != nil { + if err := srv.host.ServiceExtensions.Shutdown(ctx); err != nil { errs = multierr.Append(errs, fmt.Errorf("failed to shutdown extensions: %w", err)) } @@ -261,39 +313,36 @@ func (srv *Service) Shutdown(ctx context.Context) error { return errs } -// Creates extensions and then builds the pipeline graph. -func (srv *Service) initExtensionsAndPipeline(ctx context.Context, set Settings, cfg Config) error { +// Creates extensions. +func (srv *Service) initExtensions(ctx context.Context, cfg extensions.Config) error { var err error extensionsSettings := extensions.Settings{ Telemetry: srv.telemetrySettings, BuildInfo: srv.buildInfo, - Extensions: srv.host.extensions, + Extensions: srv.host.Extensions, + ModuleInfo: srv.host.ModuleInfo, } - if srv.host.serviceExtensions, err = extensions.New(ctx, extensionsSettings, cfg.Extensions); err != nil { + if srv.host.ServiceExtensions, err = extensions.New(ctx, extensionsSettings, cfg, extensions.WithReporter(srv.host.Reporter)); err != nil { return fmt.Errorf("failed to build extensions: %w", err) } + return nil +} - pSet := graph.Settings{ +// Creates the pipeline graph. +func (srv *Service) initGraph(ctx context.Context, cfg Config) error { + var err error + if srv.host.Pipelines, err = graph.Build(ctx, graph.Settings{ Telemetry: srv.telemetrySettings, BuildInfo: srv.buildInfo, - ReceiverBuilder: set.Receivers, - ProcessorBuilder: set.Processors, - ExporterBuilder: set.Exporters, - ConnectorBuilder: set.Connectors, + ReceiverBuilder: srv.host.Receivers, + ProcessorBuilder: srv.host.Processors, + ExporterBuilder: srv.host.Exporters, + ConnectorBuilder: srv.host.Connectors, PipelineConfigs: cfg.Pipelines, - } - - if srv.host.pipelines, err = graph.Build(ctx, pSet); err != nil { + ReportStatus: srv.host.Reporter.ReportStatus, + }); err != nil { return fmt.Errorf("failed to build pipelines: %w", err) } - - if cfg.Telemetry.Metrics.Level != configtelemetry.LevelNone && cfg.Telemetry.Metrics.Address != "" { - // The process telemetry initialization requires the ballast size, which is available after the extensions are initialized. - if err = proctelemetry.RegisterProcessMetrics(srv.telemetrySettings.MeterProvider, getBallastSize(srv.host)); err != nil { - return fmt.Errorf("failed to register process metrics: %w", err) - } - } - return nil } @@ -303,15 +352,6 @@ func (srv *Service) Logger() *zap.Logger { return srv.telemetrySettings.Logger } -func getBallastSize(host component.Host) uint64 { - for _, ext := range host.GetExtensions() { - if bExt, ok := ext.(interface{ GetBallastSize() uint64 }); ok { - return bExt.GetBallastSize() - } - } - return 0 -} - func pdataFromSdk(res *sdkresource.Resource) pcommon.Resource { // pcommon.NewResource is the best way to generate a new resource currently and is safe to use outside of tests. // Because the resource is signal agnostic, and we need a net new resource, not an existing one, this is the only diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/config.go b/vendor/go.opentelemetry.io/collector/service/telemetry/config.go index 3336fcac578..e6d80dbd16c 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/config.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/config.go @@ -4,15 +4,29 @@ package telemetry // import "go.opentelemetry.io/collector/service/telemetry" import ( + "errors" "fmt" + "net" + "strconv" "time" "go.opentelemetry.io/contrib/config" "go.uber.org/zap/zapcore" "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/featuregate" ) +var _ confmap.Unmarshaler = (*Config)(nil) + +var disableAddressFieldForInternalTelemetryFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.disableAddressFieldForInternalTelemetry", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.111.0"), + featuregate.WithRegisterToVersion("v0.114.0"), + featuregate.WithRegisterDescription("controls whether the deprecated address field for internal telemetry is still supported")) + // Config defines the configurable settings for service telemetry. type Config struct { Logs LogsConfig `mapstructure:"logs"` @@ -92,6 +106,10 @@ type LogsConfig struct { // // By default, there is no initial field. InitialFields map[string]any `mapstructure:"initial_fields"` + + // Processors allow configuration of log record processors to emit logs to + // any number of suported backends. + Processors []config.LogRecordProcessor `mapstructure:"processors"` } // LogsSamplingConfig sets a sampling strategy for the logger. Sampling caps the @@ -119,7 +137,7 @@ type MetricsConfig struct { // - "detailed" adds dimensions and views to the previous levels. Level configtelemetry.Level `mapstructure:"level"` - // Address is the [address]:port that metrics exposition should be bound to. + // Deprecated: [v0.111.0] use readers configuration. Address string `mapstructure:"address"` // Readers allow configuration of metric readers to emit metrics to @@ -130,20 +148,65 @@ type MetricsConfig struct { // TracesConfig exposes the common Telemetry configuration for collector's internal spans. // Experimental: *NOTE* this structure is subject to change or removal in the future. type TracesConfig struct { + // Level configures whether spans are emitted or not, the possible values are: + // - "none" indicates that no tracing data should be collected; + // - "basic" is the recommended and covers the basics of the service telemetry. + Level configtelemetry.Level `mapstructure:"level"` // Propagators is a list of TextMapPropagators from the supported propagators list. Currently, // tracecontext and b3 are supported. By default, the value is set to empty list and // context propagation is disabled. Propagators []string `mapstructure:"propagators"` // Processors allow configuration of span processors to emit spans to - // any number of suported backends. + // any number of supported backends. Processors []config.SpanProcessor `mapstructure:"processors"` } +func (c *Config) Unmarshal(conf *confmap.Conf) error { + if err := conf.Unmarshal(c); err != nil { + return err + } + + // If the support for "metrics::address" is disabled, nothing to do. + // TODO: when this gate is marked stable remove the whole Unmarshal definition. + if disableAddressFieldForInternalTelemetryFeatureGate.IsEnabled() { + return nil + } + + if len(c.Metrics.Address) != 0 { + host, port, err := net.SplitHostPort(c.Metrics.Address) + if err != nil { + return fmt.Errorf("failing to parse metrics address %q: %w", c.Metrics.Address, err) + } + portInt, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("failing to extract the port from the metrics address %q: %w", c.Metrics.Address, err) + } + + // User did not overwrite readers, so we will remove the default configured reader. + if !conf.IsSet("metrics::readers") { + c.Metrics.Readers = nil + } + + c.Metrics.Readers = append(c.Metrics.Readers, config.MetricReader{ + Pull: &config.PullMetricReader{ + Exporter: config.MetricExporter{ + Prometheus: &config.Prometheus{ + Host: &host, + Port: &portInt, + }, + }, + }, + }) + } + + return nil +} + // Validate checks whether the current configuration is valid func (c *Config) Validate() error { - // Check when service telemetry metric level is not none, the metrics address should not be empty - if c.Metrics.Level != configtelemetry.LevelNone && c.Metrics.Address == "" && len(c.Metrics.Readers) == 0 { - return fmt.Errorf("collector telemetry metric address or reader should exist when metric level is not none") + // Check when service telemetry metric level is not none, the metrics readers should not be empty + if c.Metrics.Level != configtelemetry.LevelNone && len(c.Metrics.Readers) == 0 { + return errors.New("collector telemetry metrics reader should exist when metric level is not none") } return nil diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go b/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go index c236d3c5733..af23cfcabae 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/factory.go @@ -7,16 +7,95 @@ import ( "context" "time" + "go.opentelemetry.io/contrib/config" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/service/telemetry/internal" + "go.opentelemetry.io/collector/featuregate" + "go.opentelemetry.io/collector/service/internal/resource" ) +var useLocalHostAsDefaultMetricsAddressFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.UseLocalHostAsDefaultMetricsAddress", + featuregate.StageBeta, + featuregate.WithRegisterFromVersion("v0.111.0"), + featuregate.WithRegisterDescription("controls whether default Prometheus metrics server use localhost as the default host for their endpoints"), +) + +// disableHighCardinalityMetricsFeatureGate is the feature gate that controls whether the collector should enable +// potentially high cardinality metrics. The gate will be removed when the collector allows for view configuration. +var disableHighCardinalityMetricsFeatureGate = featuregate.GlobalRegistry().MustRegister( + "telemetry.disableHighCardinalityMetrics", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("controls whether the collector should enable potentially high"+ + "cardinality metrics. The gate will be removed when the collector allows for view configuration.")) + +// Settings holds configuration for building Telemetry. +type Settings struct { + BuildInfo component.BuildInfo + AsyncErrorChannel chan error + ZapOptions []zap.Option + SDK *config.SDK +} + +// Factory is factory interface for telemetry. +// This interface cannot be directly implemented. Implementations must +// use the NewFactory to implement it. +type Factory interface { + // CreateDefaultConfig creates the default configuration for the telemetry. + // TODO: Should we just inherit from component.Factory? + CreateDefaultConfig() component.Config + + // CreateLogger creates a logger. + CreateLogger(ctx context.Context, set Settings, cfg component.Config) (*zap.Logger, log.LoggerProvider, error) + + // CreateTracerProvider creates a TracerProvider. + CreateTracerProvider(ctx context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) + + // CreateMeterProvider creates a MeterProvider. + CreateMeterProvider(ctx context.Context, set Settings, cfg component.Config) (metric.MeterProvider, error) + + // unexportedFactoryFunc is used to prevent external implementations of Factory. + unexportedFactoryFunc() +} + +// NewFactory creates a new Factory. +func NewFactory() Factory { + return newFactory(createDefaultConfig, + withLogger(func(_ context.Context, set Settings, cfg component.Config) (*zap.Logger, log.LoggerProvider, error) { + c := *cfg.(*Config) + return newLogger(set, c) + }), + withTracerProvider(func(_ context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) { + c := *cfg.(*Config) + return newTracerProvider(set, c) + }), + withMeterProvider(func(_ context.Context, set Settings, cfg component.Config) (metric.MeterProvider, error) { + c := *cfg.(*Config) + disableHighCard := disableHighCardinalityMetricsFeatureGate.IsEnabled() + return newMeterProvider( + meterProviderSettings{ + res: resource.New(set.BuildInfo, c.Resource), + cfg: c.Metrics, + asyncErrorChannel: set.AsyncErrorChannel, + }, + disableHighCard, + ) + }), + ) +} + func createDefaultConfig() component.Config { + metricsHost := "localhost" + if !useLocalHostAsDefaultMetricsAddressFeatureGate.IsEnabled() { + metricsHost = "" + } + return &Config{ Logs: LogsConfig{ Level: zapcore.InfoLevel, @@ -35,25 +114,19 @@ func createDefaultConfig() component.Config { InitialFields: map[string]any(nil), }, Metrics: MetricsConfig{ - Level: configtelemetry.LevelNormal, - Address: ":8888", + Level: configtelemetry.LevelNormal, + Readers: []config.MetricReader{ + { + Pull: &config.PullMetricReader{Exporter: config.MetricExporter{Prometheus: &config.Prometheus{ + Host: &metricsHost, + Port: newPtr(8888), + }}}, + }, + }, }, } } -// Factory is a telemetry factory. -type Factory = internal.Factory - -// NewFactory creates a new Factory. -func NewFactory() Factory { - return internal.NewFactory(createDefaultConfig, - internal.WithLogger(func(_ context.Context, set Settings, cfg component.Config) (*zap.Logger, error) { - c := *cfg.(*Config) - return newLogger(c.Logs, set.ZapOptions) - }), - internal.WithTracerProvider(func(ctx context.Context, _ Settings, cfg component.Config) (trace.TracerProvider, error) { - c := *cfg.(*Config) - return newTracerProvider(ctx, c) - }), - ) +func newPtr[T int | string](str T) *T { + return &str } diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go b/vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go new file mode 100644 index 00000000000..364afbde9d6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/factory_impl.go @@ -0,0 +1,111 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/collector/service/telemetry" + +import ( + "context" + + "go.opentelemetry.io/otel/log" + lognoop "go.opentelemetry.io/otel/log/noop" + "go.opentelemetry.io/otel/metric" + metricnoop "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + tracenoop "go.opentelemetry.io/otel/trace/noop" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" +) + +// factoryOption apply changes to Factory. +type factoryOption interface { + // applyTelemetryFactoryOption applies the option. + applyTelemetryFactoryOption(o *factory) +} + +var _ factoryOption = (*factoryOptionFunc)(nil) + +// factoryOptionFunc is an factoryOption created through a function. +type factoryOptionFunc func(*factory) + +func (f factoryOptionFunc) applyTelemetryFactoryOption(o *factory) { + f(o) +} + +var _ Factory = (*factory)(nil) + +// Factory is the implementation of Factory. +type factory struct { + createDefaultConfig component.CreateDefaultConfigFunc + createLoggerFunc + createTracerProviderFunc + createMeterProviderFunc +} + +func (f *factory) CreateDefaultConfig() component.Config { + return f.createDefaultConfig() +} + +// createLoggerFunc is the equivalent of Factory.CreateLogger. +type createLoggerFunc func(context.Context, Settings, component.Config) (*zap.Logger, log.LoggerProvider, error) + +// withLogger overrides the default no-op logger. +func withLogger(createLogger createLoggerFunc) factoryOption { + return factoryOptionFunc(func(o *factory) { + o.createLoggerFunc = createLogger + }) +} + +func (f *factory) CreateLogger(ctx context.Context, set Settings, cfg component.Config) (*zap.Logger, log.LoggerProvider, error) { + if f.createLoggerFunc == nil { + return zap.NewNop(), lognoop.NewLoggerProvider(), nil + } + return f.createLoggerFunc(ctx, set, cfg) +} + +// createTracerProviderFunc is the equivalent of Factory.CreateTracerProvider. +type createTracerProviderFunc func(context.Context, Settings, component.Config) (trace.TracerProvider, error) + +// withTracerProvider overrides the default no-op tracer provider. +func withTracerProvider(createTracerProvider createTracerProviderFunc) factoryOption { + return factoryOptionFunc(func(o *factory) { + o.createTracerProviderFunc = createTracerProvider + }) +} + +func (f *factory) CreateTracerProvider(ctx context.Context, set Settings, cfg component.Config) (trace.TracerProvider, error) { + if f.createTracerProviderFunc == nil { + return tracenoop.NewTracerProvider(), nil + } + return f.createTracerProviderFunc(ctx, set, cfg) +} + +// createMeterProviderFunc is the equivalent of Factory.CreateMeterProvider. +type createMeterProviderFunc func(context.Context, Settings, component.Config) (metric.MeterProvider, error) + +// withMeterProvider overrides the default no-op meter provider. +func withMeterProvider(createMeterProvider createMeterProviderFunc) factoryOption { + return factoryOptionFunc(func(o *factory) { + o.createMeterProviderFunc = createMeterProvider + }) +} + +func (f *factory) CreateMeterProvider(ctx context.Context, set Settings, cfg component.Config) (metric.MeterProvider, error) { + if f.createMeterProviderFunc == nil { + return metricnoop.NewMeterProvider(), nil + } + return f.createMeterProviderFunc(ctx, set, cfg) +} + +func (f *factory) unexportedFactoryFunc() {} + +// newFactory returns a new Factory. +func newFactory(createDefaultConfig component.CreateDefaultConfigFunc, options ...factoryOption) Factory { + f := &factory{ + createDefaultConfig: createDefaultConfig, + } + for _, op := range options { + op.applyTelemetryFactoryOption(f) + } + return f +} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go deleted file mode 100644 index f1368c9704d..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/internal/factory.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/collector/service/telemetry/internal" - -import ( - "context" - - "go.opentelemetry.io/otel/trace" - tracenoop "go.opentelemetry.io/otel/trace/noop" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" -) - -// CreateSettings holds configuration for building Telemetry. -type CreateSettings struct { - BuildInfo component.BuildInfo - AsyncErrorChannel chan error - ZapOptions []zap.Option -} - -// Factory is factory interface for telemetry. -// This interface cannot be directly implemented. Implementations must -// use the NewFactory to implement it. -type Factory interface { - // CreateDefaultConfig creates the default configuration for the telemetry. - // TODO: Should we just inherit from component.Factory? - CreateDefaultConfig() component.Config - - // CreateLogger creates a logger. - CreateLogger(ctx context.Context, set CreateSettings, cfg component.Config) (*zap.Logger, error) - - // CreateTracerProvider creates a TracerProvider. - CreateTracerProvider(ctx context.Context, set CreateSettings, cfg component.Config) (trace.TracerProvider, error) - - // TODO: Add CreateMeterProvider. - - // unexportedFactoryFunc is used to prevent external implementations of Factory. - unexportedFactoryFunc() -} - -// FactoryOption apply changes to Factory. -type FactoryOption interface { - // applyTelemetryFactoryOption applies the option. - applyTelemetryFactoryOption(o *factory) -} - -var _ FactoryOption = (*factoryOptionFunc)(nil) - -// factoryOptionFunc is an FactoryOption created through a function. -type factoryOptionFunc func(*factory) - -func (f factoryOptionFunc) applyTelemetryFactoryOption(o *factory) { - f(o) -} - -var _ Factory = (*factory)(nil) - -// factory is the implementation of Factory. -type factory struct { - createDefaultConfig component.CreateDefaultConfigFunc - CreateLoggerFunc - CreateTracerProviderFunc -} - -func (f *factory) CreateDefaultConfig() component.Config { - return f.createDefaultConfig() -} - -// CreateLoggerFunc is the equivalent of Factory.CreateLogger. -type CreateLoggerFunc func(context.Context, CreateSettings, component.Config) (*zap.Logger, error) - -// WithLogger overrides the default no-op logger. -func WithLogger(createLogger CreateLoggerFunc) FactoryOption { - return factoryOptionFunc(func(o *factory) { - o.CreateLoggerFunc = createLogger - }) -} - -func (f *factory) CreateLogger(ctx context.Context, set CreateSettings, cfg component.Config) (*zap.Logger, error) { - if f.CreateLoggerFunc == nil { - return zap.NewNop(), nil - } - return f.CreateLoggerFunc(ctx, set, cfg) -} - -// CreateTracerProviderFunc is the equivalent of Factory.CreateTracerProvider. -type CreateTracerProviderFunc func(context.Context, CreateSettings, component.Config) (trace.TracerProvider, error) - -// WithTracerProvider overrides the default no-op tracer provider. -func WithTracerProvider(createTracerProvider CreateTracerProviderFunc) FactoryOption { - return factoryOptionFunc(func(o *factory) { - o.CreateTracerProviderFunc = createTracerProvider - }) -} - -func (f *factory) CreateTracerProvider(ctx context.Context, set CreateSettings, cfg component.Config) (trace.TracerProvider, error) { - if f.CreateTracerProviderFunc == nil { - return tracenoop.NewTracerProvider(), nil - } - return f.CreateTracerProviderFunc(ctx, set, cfg) -} - -func (f *factory) unexportedFactoryFunc() {} - -// NewFactory returns a new Factory. -func NewFactory(createDefaultConfig component.CreateDefaultConfigFunc, options ...FactoryOption) Factory { - f := &factory{ - createDefaultConfig: createDefaultConfig, - } - for _, op := range options { - op.applyTelemetryFactoryOption(f) - } - return f -} diff --git a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/otelinit/config.go similarity index 61% rename from vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go rename to vendor/go.opentelemetry.io/collector/service/telemetry/internal/otelinit/config.go index 0a9a8b08d82..bb75023f204 100644 --- a/vendor/go.opentelemetry.io/collector/service/internal/proctelemetry/config.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/internal/otelinit/config.go @@ -1,33 +1,36 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package proctelemetry // import "go.opentelemetry.io/collector/service/internal/proctelemetry" +package otelinit // import "go.opentelemetry.io/collector/service/telemetry/internal/otelinit" import ( "context" "encoding/json" "errors" "fmt" + "net" "net/http" "net/url" "os" + "strconv" "strings" + "sync" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/contrib/config" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/bridge/opencensus" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" otelprom "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/sdk/instrumentation" sdkmetric "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" + "google.golang.org/grpc/credentials" - "go.opentelemetry.io/collector/processor/processorhelper" semconv "go.opentelemetry.io/collector/semconv/v1.18.0" ) @@ -40,35 +43,34 @@ const ( HTTPInstrumentation = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" // supported protocols - protocolProtobufHTTP = "http/protobuf" - protocolProtobufGRPC = "grpc/protobuf" + protocolProtobufHTTP = "http/protobuf" + protocolProtobufGRPC = "grpc/protobuf" + defaultReadHeaderTimeout = 10 * time.Second ) var ( // GRPCUnacceptableKeyValues is a list of high cardinality grpc attributes that should be filtered out. - GRPCUnacceptableKeyValues = []attribute.KeyValue{ + GRPCUnacceptableKeyValues = attribute.NewSet( attribute.String(semconv.AttributeNetSockPeerAddr, ""), attribute.String(semconv.AttributeNetSockPeerPort, ""), attribute.String(semconv.AttributeNetSockPeerName, ""), - } + ) // HTTPUnacceptableKeyValues is a list of high cardinality http attributes that should be filtered out. - HTTPUnacceptableKeyValues = []attribute.KeyValue{ + HTTPUnacceptableKeyValues = attribute.NewSet( attribute.String(semconv.AttributeNetHostName, ""), attribute.String(semconv.AttributeNetHostPort, ""), - } + ) errNoValidMetricExporter = errors.New("no valid metric exporter") ) -func InitMetricReader(ctx context.Context, reader config.MetricReader, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { +func InitMetricReader(ctx context.Context, reader config.MetricReader, asyncErrorChannel chan error, serverWG *sync.WaitGroup) (sdkmetric.Reader, *http.Server, error) { if reader.Pull != nil { - return initPullExporter(reader.Pull.Exporter, asyncErrorChannel) + return initPullExporter(reader.Pull.Exporter, asyncErrorChannel, serverWG) } if reader.Periodic != nil { - opts := []sdkmetric.PeriodicReaderOption{ - sdkmetric.WithProducer(opencensus.NewMetricProducer()), - } + var opts []sdkmetric.PeriodicReaderOption if reader.Periodic.Interval != nil { opts = append(opts, sdkmetric.WithInterval(time.Duration(*reader.Periodic.Interval)*time.Millisecond)) } @@ -84,7 +86,7 @@ func InitMetricReader(ctx context.Context, reader config.MetricReader, asyncErro func InitOpenTelemetry(res *resource.Resource, options []sdkmetric.Option, disableHighCardinality bool) (*sdkmetric.MeterProvider, error) { opts := []sdkmetric.Option{ sdkmetric.WithResource(res), - sdkmetric.WithView(batchViews(disableHighCardinality)...), + sdkmetric.WithView(disableHighCardinalityViews(disableHighCardinality)...), } opts = append(opts, options...) @@ -93,94 +95,82 @@ func InitOpenTelemetry(res *resource.Resource, options []sdkmetric.Option, disab ), nil } -func InitPrometheusServer(registry *prometheus.Registry, address string, asyncErrorChannel chan error) *http.Server { +func InitPrometheusServer(registry *prometheus.Registry, address string, asyncErrorChannel chan error, serverWG *sync.WaitGroup) *http.Server { mux := http.NewServeMux() mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) server := &http.Server{ - Addr: address, - Handler: mux, + Addr: address, + Handler: mux, + ReadHeaderTimeout: defaultReadHeaderTimeout, } + + serverWG.Add(1) go func() { + defer serverWG.Done() if serveErr := server.ListenAndServe(); serveErr != nil && !errors.Is(serveErr, http.ErrServerClosed) { - asyncErrorChannel <- serveErr + select { + case asyncErrorChannel <- serveErr: + case <-time.After(1 * time.Second): + } } }() return server } -func batchViews(disableHighCardinality bool) []sdkmetric.View { - views := []sdkmetric.View{ +func disableHighCardinalityViews(disableHighCardinality bool) []sdkmetric.View { + if !disableHighCardinality { + return nil + } + return []sdkmetric.View{ sdkmetric.NewView( - sdkmetric.Instrument{Name: processorhelper.BuildCustomMetricName("batch", "batch_send_size")}, - sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: []float64{10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, 100000}, - }}, - ), + sdkmetric.Instrument{Scope: instrumentation.Scope{Name: GRPCInstrumentation}}, + sdkmetric.Stream{ + AttributeFilter: cardinalityFilter(GRPCUnacceptableKeyValues), + }), sdkmetric.NewView( - sdkmetric.Instrument{Name: processorhelper.BuildCustomMetricName("batch", "batch_send_size_bytes")}, - sdkmetric.Stream{Aggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: []float64{10, 25, 50, 75, 100, 250, 500, 750, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 20000, 30000, 50000, - 100_000, 200_000, 300_000, 400_000, 500_000, 600_000, 700_000, 800_000, 900_000, - 1000_000, 2000_000, 3000_000, 4000_000, 5000_000, 6000_000, 7000_000, 8000_000, 9000_000}, - }}, - ), + sdkmetric.Instrument{Scope: instrumentation.Scope{Name: HTTPInstrumentation}}, + sdkmetric.Stream{ + AttributeFilter: cardinalityFilter(HTTPUnacceptableKeyValues), + }), } - if disableHighCardinality { - views = append(views, sdkmetric.NewView(sdkmetric.Instrument{ - Scope: instrumentation.Scope{ - Name: GRPCInstrumentation, - }, - }, sdkmetric.Stream{ - AttributeFilter: cardinalityFilter(GRPCUnacceptableKeyValues...), - })) - views = append(views, sdkmetric.NewView(sdkmetric.Instrument{ - Scope: instrumentation.Scope{ - Name: HTTPInstrumentation, - }, - }, sdkmetric.Stream{ - AttributeFilter: cardinalityFilter(HTTPUnacceptableKeyValues...), - })) - } - return views } -func cardinalityFilter(kvs ...attribute.KeyValue) attribute.Filter { - filter := attribute.NewSet(kvs...) +func cardinalityFilter(filter attribute.Set) attribute.Filter { return func(kv attribute.KeyValue) bool { return !filter.HasValue(kv.Key) } } -func initPrometheusExporter(prometheusConfig *config.Prometheus, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { +func initPrometheusExporter(prometheusConfig *config.Prometheus, asyncErrorChannel chan error, serverWG *sync.WaitGroup) (sdkmetric.Reader, *http.Server, error) { promRegistry := prometheus.NewRegistry() if prometheusConfig.Host == nil { - return nil, nil, fmt.Errorf("host must be specified") + return nil, nil, errors.New("host must be specified") } if prometheusConfig.Port == nil { - return nil, nil, fmt.Errorf("port must be specified") + return nil, nil, errors.New("port must be specified") } - exporter, err := otelprom.New( + + opts := []otelprom.Option{ otelprom.WithRegisterer(promRegistry), // https://github.com/open-telemetry/opentelemetry-collector/issues/8043 otelprom.WithoutUnits(), // Disabled for the moment until this becomes stable, and we are ready to break backwards compatibility. otelprom.WithoutScopeInfo(), - otelprom.WithProducer(opencensus.NewMetricProducer()), // This allows us to produce metrics that are backwards compatible w/ opencensus otelprom.WithoutCounterSuffixes(), - otelprom.WithNamespace("otelcol"), otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()), - ) + } + exporter, err := otelprom.New(opts...) if err != nil { return nil, nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) } - return exporter, InitPrometheusServer(promRegistry, fmt.Sprintf("%s:%d", *prometheusConfig.Host, *prometheusConfig.Port), asyncErrorChannel), nil + return exporter, InitPrometheusServer(promRegistry, net.JoinHostPort(*prometheusConfig.Host, strconv.Itoa(*prometheusConfig.Port)), asyncErrorChannel, serverWG), nil } -func initPullExporter(exporter config.MetricExporter, asyncErrorChannel chan error) (sdkmetric.Reader, *http.Server, error) { +func initPullExporter(exporter config.MetricExporter, asyncErrorChannel chan error, serverWG *sync.WaitGroup) (sdkmetric.Reader, *http.Server, error) { if exporter.Prometheus != nil { - return initPrometheusExporter(exporter.Prometheus, asyncErrorChannel) + return initPrometheusExporter(exporter.Prometheus, asyncErrorChannel, serverWG) } return nil, nil, errNoValidMetricExporter } @@ -219,7 +209,7 @@ func initPeriodicExporter(ctx context.Context, exporter config.MetricExporter, o func normalizeEndpoint(endpoint string) string { if !strings.HasPrefix(endpoint, "https://") && !strings.HasPrefix(endpoint, "http://") { - return fmt.Sprintf("http://%s", endpoint) + return "http://" + endpoint } return endpoint } @@ -235,6 +225,12 @@ func initOTLPgRPCExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (s opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) if u.Scheme == "http" { opts = append(opts, otlpmetricgrpc.WithInsecure()) + } else if otlpConfig.Certificate != nil { + creds, err := credentials.NewClientTLSFromFile(*otlpConfig.Certificate, "") + if err != nil { + return nil, fmt.Errorf("could not create client tls credentials: %w", err) + } + opts = append(opts, otlpmetricgrpc.WithTLSCredentials(creds)) } } @@ -254,6 +250,18 @@ func initOTLPgRPCExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (s if len(otlpConfig.Headers) > 0 { opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(temporalityPreferenceDelta)) + case "cumulative": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(temporalityPreferenceCumulative)) + case "lowmemory": + opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(temporalityPreferenceLowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } return otlpmetricgrpc.New(ctx, opts...) } @@ -291,6 +299,44 @@ func initOTLPHTTPExporter(ctx context.Context, otlpConfig *config.OTLPMetric) (s if len(otlpConfig.Headers) > 0 { opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) } + if otlpConfig.TemporalityPreference != nil { + switch *otlpConfig.TemporalityPreference { + case "delta": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(temporalityPreferenceDelta)) + case "cumulative": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(temporalityPreferenceCumulative)) + case "lowmemory": + opts = append(opts, otlpmetrichttp.WithTemporalitySelector(temporalityPreferenceLowMemory)) + default: + return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) + } + } return otlpmetrichttp.New(ctx, opts...) } + +func temporalityPreferenceCumulative(_ sdkmetric.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +func temporalityPreferenceDelta(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindObservableCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + case sdkmetric.InstrumentKindObservableUpDownCounter, sdkmetric.InstrumentKindUpDownCounter: + return metricdata.CumulativeTemporality + default: + return metricdata.DeltaTemporality + } +} + +func temporalityPreferenceLowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { + switch ik { + case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: + return metricdata.DeltaTemporality + case sdkmetric.InstrumentKindObservableCounter, sdkmetric.InstrumentKindObservableUpDownCounter, sdkmetric.InstrumentKindUpDownCounter: + return metricdata.CumulativeTemporality + default: + return metricdata.DeltaTemporality + } +} diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go b/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go index eb675bc459f..b6fe6d22ae1 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/logger.go @@ -4,22 +4,27 @@ package telemetry // import "go.opentelemetry.io/collector/service/telemetry" import ( + "go.opentelemetry.io/contrib/bridges/otelzap" + "go.opentelemetry.io/otel/log" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) -func newLogger(cfg LogsConfig, options []zap.Option) (*zap.Logger, error) { +// newLogger creates a Logger and a LoggerProvider from Config. +func newLogger(set Settings, cfg Config) (*zap.Logger, log.LoggerProvider, error) { // Copied from NewProductionConfig. + ec := zap.NewProductionEncoderConfig() + ec.EncodeTime = zapcore.ISO8601TimeEncoder zapCfg := &zap.Config{ - Level: zap.NewAtomicLevelAt(cfg.Level), - Development: cfg.Development, - Encoding: cfg.Encoding, - EncoderConfig: zap.NewProductionEncoderConfig(), - OutputPaths: cfg.OutputPaths, - ErrorOutputPaths: cfg.ErrorOutputPaths, - DisableCaller: cfg.DisableCaller, - DisableStacktrace: cfg.DisableStacktrace, - InitialFields: cfg.InitialFields, + Level: zap.NewAtomicLevelAt(cfg.Logs.Level), + Development: cfg.Logs.Development, + Encoding: cfg.Logs.Encoding, + EncoderConfig: ec, + OutputPaths: cfg.Logs.OutputPaths, + ErrorOutputPaths: cfg.Logs.ErrorOutputPaths, + DisableCaller: cfg.Logs.DisableCaller, + DisableStacktrace: cfg.Logs.DisableStacktrace, + InitialFields: cfg.Logs.InitialFields, } if zapCfg.Encoding == "console" { @@ -27,15 +32,35 @@ func newLogger(cfg LogsConfig, options []zap.Option) (*zap.Logger, error) { zapCfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder } - logger, err := zapCfg.Build(options...) + logger, err := zapCfg.Build(set.ZapOptions...) if err != nil { - return nil, err + return nil, nil, err } - if cfg.Sampling != nil && cfg.Sampling.Enabled { - logger = newSampledLogger(logger, cfg.Sampling) + + var lp log.LoggerProvider + + if len(cfg.Logs.Processors) > 0 && set.SDK != nil { + lp = set.SDK.LoggerProvider() + + logger = logger.WithOptions(zap.WrapCore(func(c zapcore.Core) zapcore.Core { + core, err := zapcore.NewIncreaseLevelCore(zapcore.NewTee( + c, + otelzap.NewCore("go.opentelemetry.io/collector/service/telemetry", + otelzap.WithLoggerProvider(lp), + ), + ), zap.NewAtomicLevelAt(cfg.Logs.Level)) + if err != nil { + panic(err) + } + return core + })) + } + + if cfg.Logs.Sampling != nil && cfg.Logs.Sampling.Enabled { + logger = newSampledLogger(logger, cfg.Logs.Sampling) } - return logger, nil + return logger, lp, nil } func newSampledLogger(logger *zap.Logger, sc *LogsSamplingConfig) *zap.Logger { diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry.go b/vendor/go.opentelemetry.io/collector/service/telemetry/metrics.go similarity index 58% rename from vendor/go.opentelemetry.io/collector/service/telemetry.go rename to vendor/go.opentelemetry.io/collector/service/telemetry/metrics.go index c2a67c7f16f..0b2690c0af1 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/metrics.go @@ -1,15 +1,13 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package service // import "go.opentelemetry.io/collector/service" +package telemetry // import "go.opentelemetry.io/collector/service/telemetry" import ( "context" - "net" "net/http" - "strconv" + "sync" - "go.opentelemetry.io/contrib/config" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" sdkmetric "go.opentelemetry.io/otel/sdk/metric" @@ -18,72 +16,48 @@ import ( "go.uber.org/zap" "go.opentelemetry.io/collector/config/configtelemetry" - "go.opentelemetry.io/collector/service/internal/proctelemetry" - "go.opentelemetry.io/collector/service/telemetry" + "go.opentelemetry.io/collector/service/telemetry/internal/otelinit" ) const ( zapKeyTelemetryAddress = "address" - zapKeyTelemetryLevel = "level" + zapKeyTelemetryLevel = "metrics level" ) type meterProvider struct { *sdkmetric.MeterProvider - servers []*http.Server + servers []*http.Server + serverWG sync.WaitGroup } type meterProviderSettings struct { res *resource.Resource - cfg telemetry.MetricsConfig + cfg MetricsConfig asyncErrorChannel chan error } +// newMeterProvider creates a new MeterProvider from Config. func newMeterProvider(set meterProviderSettings, disableHighCardinality bool) (metric.MeterProvider, error) { - if set.cfg.Level == configtelemetry.LevelNone || (set.cfg.Address == "" && len(set.cfg.Readers) == 0) { + if set.cfg.Level == configtelemetry.LevelNone || len(set.cfg.Readers) == 0 { return noop.NewMeterProvider(), nil } - if len(set.cfg.Address) != 0 { - host, port, err := net.SplitHostPort(set.cfg.Address) - if err != nil { - return nil, err - } - portInt, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - if set.cfg.Readers == nil { - set.cfg.Readers = []config.MetricReader{} - } - set.cfg.Readers = append(set.cfg.Readers, config.MetricReader{ - Pull: &config.PullMetricReader{ - Exporter: config.MetricExporter{ - Prometheus: &config.Prometheus{ - Host: &host, - Port: &portInt, - }, - }, - }, - }) - } - mp := &meterProvider{} var opts []sdkmetric.Option for _, reader := range set.cfg.Readers { // https://github.com/open-telemetry/opentelemetry-collector/issues/8045 - r, server, err := proctelemetry.InitMetricReader(context.Background(), reader, set.asyncErrorChannel) + r, server, err := otelinit.InitMetricReader(context.Background(), reader, set.asyncErrorChannel, &mp.serverWG) if err != nil { return nil, err } if server != nil { mp.servers = append(mp.servers, server) - } opts = append(opts, sdkmetric.WithReader(r)) } var err error - mp.MeterProvider, err = proctelemetry.InitOpenTelemetry(set.res, opts, disableHighCardinality) + mp.MeterProvider, err = otelinit.InitOpenTelemetry(set.res, opts, disableHighCardinality) if err != nil { return nil, err } @@ -91,7 +65,7 @@ func newMeterProvider(set meterProviderSettings, disableHighCardinality bool) (m } // LogAboutServers logs about the servers that are serving metrics. -func (mp *meterProvider) LogAboutServers(logger *zap.Logger, cfg telemetry.MetricsConfig) { +func (mp *meterProvider) LogAboutServers(logger *zap.Logger, cfg MetricsConfig) { for _, server := range mp.servers { logger.Info( "Serving metrics", @@ -110,5 +84,8 @@ func (mp *meterProvider) Shutdown(ctx context.Context) error { errs = multierr.Append(errs, server.Close()) } } - return multierr.Append(errs, mp.MeterProvider.Shutdown(ctx)) + errs = multierr.Append(errs, mp.MeterProvider.Shutdown(ctx)) + mp.serverWG.Wait() + + return errs } diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go b/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go deleted file mode 100644 index c9346c26d59..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/telemetry.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package telemetry // import "go.opentelemetry.io/collector/service/telemetry" - -import ( - "go.opentelemetry.io/collector/service/telemetry/internal" -) - -// Settings holds configuration for building Telemetry. -type Settings = internal.CreateSettings diff --git a/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go b/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go index d235ca3aaf2..f9ff92c1878 100644 --- a/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go +++ b/vendor/go.opentelemetry.io/collector/service/telemetry/tracer.go @@ -7,57 +7,53 @@ import ( "context" "errors" - "go.opentelemetry.io/contrib/config" "go.opentelemetry.io/contrib/propagators/b3" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/collector/config/configtelemetry" + "go.opentelemetry.io/collector/featuregate" ) +var noopTracerProvider = featuregate.GlobalRegistry().MustRegister("service.noopTracerProvider", + featuregate.StageAlpha, + featuregate.WithRegisterFromVersion("v0.107.0"), + featuregate.WithRegisterToVersion("v0.109.0"), + featuregate.WithRegisterDescription("Sets a Noop OpenTelemetry TracerProvider to reduce memory allocations. This featuregate is incompatible with the zPages extension.")) + const ( // supported trace propagators traceContextPropagator = "tracecontext" b3Propagator = "b3" ) -var ( - errUnsupportedPropagator = errors.New("unsupported trace propagator") -) +var errUnsupportedPropagator = errors.New("unsupported trace propagator") -// New creates a new Telemetry from Config. -func newTracerProvider(ctx context.Context, cfg Config) (trace.TracerProvider, error) { - sdk, err := config.NewSDK( - config.WithContext(ctx), - config.WithOpenTelemetryConfiguration( - config.OpenTelemetryConfiguration{ - TracerProvider: &config.TracerProvider{ - Processors: cfg.Traces.Processors, - // TODO: once https://github.com/open-telemetry/opentelemetry-configuration/issues/83 is resolved, - // configuration for sampler should be done here via something like the following: - // - // Sampler: &config.Sampler{ - // ParentBased: &config.SamplerParentBased{ - // LocalParentSampled: &config.Sampler{ - // AlwaysOn: config.SamplerAlwaysOn{}, - // }, - // LocalParentNotSampled: &config.Sampler{ - // RecordOnly: config.SamplerRecordOnly{}, - // }, - // RemoteParentSampled: &config.Sampler{ - // AlwaysOn: config.SamplerAlwaysOn{}, - // }, - // RemoteParentNotSampled: &config.Sampler{ - // RecordOnly: config.SamplerRecordOnly{}, - // }, - // }, - // }, - }, - }, - ), - ) - - if err != nil { - return nil, err +type noopNoContextTracer struct { + embedded.Tracer +} + +var noopSpan = noop.Span{} + +func (n *noopNoContextTracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + return ctx, noopSpan +} + +type noopNoContextTracerProvider struct { + embedded.TracerProvider +} + +func (n *noopNoContextTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { + return &noopNoContextTracer{} +} + +// newTracerProvider creates a new TracerProvider from Config. +func newTracerProvider(set Settings, cfg Config) (trace.TracerProvider, error) { + if noopTracerProvider.IsEnabled() || cfg.Traces.Level == configtelemetry.LevelNone { + return &noopNoContextTracerProvider{}, nil } if tp, err := textMapPropagatorFromConfig(cfg.Traces.Propagators); err == nil { @@ -66,7 +62,10 @@ func newTracerProvider(ctx context.Context, cfg Config) (trace.TracerProvider, e return nil, err } - return sdk.TracerProvider(), nil + if set.SDK != nil { + return set.SDK.TracerProvider(), nil + } + return nil, errors.New("no sdk set") } func textMapPropagatorFromConfig(props []string) (propagation.TextMapPropagator, error) { diff --git a/vendor/go.opentelemetry.io/collector/service/zpages.go b/vendor/go.opentelemetry.io/collector/service/zpages.go deleted file mode 100644 index c7f7b494ad3..00000000000 --- a/vendor/go.opentelemetry.io/collector/service/zpages.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package service // import "go.opentelemetry.io/collector/service" - -import ( - "net/http" - "path" - "runtime" - "time" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/featuregate" - "go.opentelemetry.io/collector/service/internal/zpages" -) - -const ( - // Paths - zServicePath = "servicez" - zPipelinePath = "pipelinez" - zExtensionPath = "extensionz" - zFeaturePath = "featurez" -) - -var ( - // InfoVar is a singleton instance of the Info struct. - runtimeInfoVar [][2]string -) - -func init() { - runtimeInfoVar = [][2]string{ - {"StartTimestamp", time.Now().String()}, - {"Go", runtime.Version()}, - {"OS", runtime.GOOS}, - {"Arch", runtime.GOARCH}, - // Add other valuable runtime information here. - } -} - -func (host *serviceHost) RegisterZPages(mux *http.ServeMux, pathPrefix string) { - mux.HandleFunc(path.Join(pathPrefix, zServicePath), host.zPagesRequest) - mux.HandleFunc(path.Join(pathPrefix, zPipelinePath), host.pipelines.HandleZPages) - mux.HandleFunc(path.Join(pathPrefix, zExtensionPath), host.serviceExtensions.HandleZPages) - mux.HandleFunc(path.Join(pathPrefix, zFeaturePath), handleFeaturezRequest) -} - -func (host *serviceHost) zPagesRequest(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Service " + host.buildInfo.Command}) - zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Build Info", Properties: getBuildInfoProperties(host.buildInfo)}) - zpages.WriteHTMLPropertiesTable(w, zpages.PropertiesTableData{Name: "Runtime Info", Properties: runtimeInfoVar}) - zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ - Name: "Pipelines", - ComponentEndpoint: zPipelinePath, - Link: true, - }) - zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ - Name: "Extensions", - ComponentEndpoint: zExtensionPath, - Link: true, - }) - zpages.WriteHTMLComponentHeader(w, zpages.ComponentHeaderData{ - Name: "Features", - ComponentEndpoint: zFeaturePath, - Link: true, - }) - zpages.WriteHTMLPageFooter(w) -} - -func handleFeaturezRequest(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - zpages.WriteHTMLPageHeader(w, zpages.HeaderData{Title: "Feature Gates"}) - zpages.WriteHTMLFeaturesTable(w, getFeaturesTableData()) - zpages.WriteHTMLPageFooter(w) -} - -func getFeaturesTableData() zpages.FeatureGateTableData { - data := zpages.FeatureGateTableData{} - featuregate.GlobalRegistry().VisitAll(func(gate *featuregate.Gate) { - data.Rows = append(data.Rows, zpages.FeatureGateTableRowData{ - ID: gate.ID(), - Enabled: gate.IsEnabled(), - Description: gate.Description(), - Stage: gate.Stage().String(), - FromVersion: gate.FromVersion(), - ToVersion: gate.ToVersion(), - ReferenceURL: gate.ReferenceURL(), - }) - }) - return data -} - -func getBuildInfoProperties(buildInfo component.BuildInfo) [][2]string { - return [][2]string{ - {"Command", buildInfo.Command}, - {"Description", buildInfo.Description}, - {"Version", buildInfo.Version}, - } -} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md new file mode 100644 index 00000000000..5565260ae55 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/README.md @@ -0,0 +1,3 @@ +# OpenTelemetry Zap Log Bridge + +[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/contrib/bridges/otelzap.svg)](https://pkg.go.dev/go.opentelemetry.io/contrib/bridges/otelzap) diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go new file mode 100644 index 00000000000..6f64c794b76 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/convert.go @@ -0,0 +1,123 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/logutil/convert.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "fmt" + "math" + "reflect" + "strconv" + "time" + + "go.opentelemetry.io/otel/log" +) + +// convertValue converts various types to log.Value. +func convertValue(v any) log.Value { + // Handling the most common types without reflect is a small perf win. + switch val := v.(type) { + case bool: + return log.BoolValue(val) + case string: + return log.StringValue(val) + case int: + return log.Int64Value(int64(val)) + case int8: + return log.Int64Value(int64(val)) + case int16: + return log.Int64Value(int64(val)) + case int32: + return log.Int64Value(int64(val)) + case int64: + return log.Int64Value(val) + case uint: + return convertUintValue(uint64(val)) + case uint8: + return log.Int64Value(int64(val)) + case uint16: + return log.Int64Value(int64(val)) + case uint32: + return log.Int64Value(int64(val)) + case uint64: + return convertUintValue(val) + case uintptr: + return convertUintValue(uint64(val)) + case float32: + return log.Float64Value(float64(val)) + case float64: + return log.Float64Value(val) + case time.Duration: + return log.Int64Value(val.Nanoseconds()) + case complex64: + r := log.Float64("r", real(complex128(val))) + i := log.Float64("i", imag(complex128(val))) + return log.MapValue(r, i) + case complex128: + r := log.Float64("r", real(val)) + i := log.Float64("i", imag(val)) + return log.MapValue(r, i) + case time.Time: + return log.Int64Value(val.UnixNano()) + case []byte: + return log.BytesValue(val) + case error: + return log.StringValue(val.Error()) + } + + t := reflect.TypeOf(v) + if t == nil { + return log.Value{} + } + val := reflect.ValueOf(v) + switch t.Kind() { + case reflect.Struct: + return log.StringValue(fmt.Sprintf("%+v", v)) + case reflect.Slice, reflect.Array: + items := make([]log.Value, 0, val.Len()) + for i := 0; i < val.Len(); i++ { + items = append(items, convertValue(val.Index(i).Interface())) + } + return log.SliceValue(items...) + case reflect.Map: + kvs := make([]log.KeyValue, 0, val.Len()) + for _, k := range val.MapKeys() { + var key string + switch k.Kind() { + case reflect.String: + key = k.String() + default: + key = fmt.Sprintf("%+v", k.Interface()) + } + kvs = append(kvs, log.KeyValue{ + Key: key, + Value: convertValue(val.MapIndex(k).Interface()), + }) + } + return log.MapValue(kvs...) + case reflect.Ptr, reflect.Interface: + if val.IsNil() { + return log.Value{} + } + return convertValue(val.Elem().Interface()) + } + + // Try to handle this as gracefully as possible. + // + // Don't panic here. it is preferable to have user's open issue + // asking why their attributes have a "unhandled: " prefix than + // say that their code is panicking. + return log.StringValue(fmt.Sprintf("unhandled: (%s) %+v", t, v)) +} + +// convertUintValue converts a uint64 to a log.Value. +// If the value is too large to fit in an int64, it is converted to a string. +func convertUintValue(v uint64) log.Value { + if v > math.MaxInt64 { + return log.StringValue(strconv.FormatUint(v, 10)) + } + return log.Int64Value(int64(v)) +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go new file mode 100644 index 00000000000..545f756803f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/core.go @@ -0,0 +1,277 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelzap provides a bridge between the [go.uber.org/zap] and +// [OpenTelemetry]. +// +// # Record Conversion +// +// The [zapcore.Entry] and [zapcore.Field] are converted to OpenTelemetry [log.Record] in the following +// way: +// +// - Time is set as the Timestamp. +// - Message is set as the Body using a [log.StringValue]. +// - Level is transformed and set as the Severity. The SeverityText is also +// set. +// - Fields are transformed and set as the Attributes. +// - Field value of type [context.Context] is used as context when emitting log records. +// - For named loggers, LoggerName is used to access [log.Logger] from [log.LoggerProvider] +// +// The Level is transformed to the OpenTelemetry Severity types in the following way. +// +// - [zapcore.DebugLevel] is transformed to [log.SeverityDebug] +// - [zapcore.InfoLevel] is transformed to [log.SeverityInfo] +// - [zapcore.WarnLevel] is transformed to [log.SeverityWarn] +// - [zapcore.ErrorLevel] is transformed to [log.SeverityError] +// - [zapcore.DPanicLevel] is transformed to [log.SeverityFatal1] +// - [zapcore.PanicLevel] is transformed to [log.SeverityFatal2] +// - [zapcore.FatalLevel] is transformed to [log.SeverityFatal3] +// +// Fields are transformed based on their type into log attributes, or +// into a string value encoded using [fmt.Sprintf] if there is no matching type. +// +// [OpenTelemetry]: https://opentelemetry.io/docs/concepts/signals/logs/ +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "context" + "slices" + "strings" + + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/global" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type config struct { + provider log.LoggerProvider + version string + schemaURL string +} + +func newConfig(options []Option) config { + var c config + for _, opt := range options { + c = opt.apply(c) + } + + if c.provider == nil { + c.provider = global.GetLoggerProvider() + } + + return c +} + +// Option configures a [Core]. +type Option interface { + apply(config) config +} + +type optFunc func(config) config + +func (f optFunc) apply(c config) config { return f(c) } + +// WithVersion returns an [Option] that configures the version of the +// [log.Logger] used by a [Core]. The version should be the version of the +// package that is being logged. +func WithVersion(version string) Option { + return optFunc(func(c config) config { + c.version = version + return c + }) +} + +// WithSchemaURL returns an [Option] that configures the semantic convention +// schema URL of the [log.Logger] used by a [Core]. The schemaURL should be +// the schema URL for the semantic conventions used in log records. +func WithSchemaURL(schemaURL string) Option { + return optFunc(func(c config) config { + c.schemaURL = schemaURL + return c + }) +} + +// WithLoggerProvider returns an [Option] that configures [log.LoggerProvider] +// used by a [Core] to create its [log.Logger]. +// +// By default if this Option is not provided, the Handler will use the global +// LoggerProvider. +func WithLoggerProvider(provider log.LoggerProvider) Option { + return optFunc(func(c config) config { + c.provider = provider + return c + }) +} + +// Core is a [zapcore.Core] that sends logging records to OpenTelemetry. +type Core struct { + provider log.LoggerProvider + logger log.Logger + opts []log.LoggerOption + attr []log.KeyValue + ctx context.Context +} + +// Compile-time check *Core implements zapcore.Core. +var _ zapcore.Core = (*Core)(nil) + +// NewCore creates a new [zapcore.Core] that can be used with [go.uber.org/zap.New]. +// The name should be the package import path that is being logged. +// The name is ignored for named loggers created using [go.uber.org/zap.Logger.Named]. +func NewCore(name string, opts ...Option) *Core { + cfg := newConfig(opts) + + var loggerOpts []log.LoggerOption + if cfg.version != "" { + loggerOpts = append(loggerOpts, log.WithInstrumentationVersion(cfg.version)) + } + if cfg.schemaURL != "" { + loggerOpts = append(loggerOpts, log.WithSchemaURL(cfg.schemaURL)) + } + + logger := cfg.provider.Logger(name, loggerOpts...) + + return &Core{ + provider: cfg.provider, + logger: logger, + opts: loggerOpts, + ctx: context.Background(), + } +} + +// Enabled decides whether a given logging level is enabled when logging a message. +func (o *Core) Enabled(level zapcore.Level) bool { + param := log.EnabledParameters{Severity: convertLevel(level)} + return o.logger.Enabled(context.Background(), param) +} + +// With adds structured context to the Core. +func (o *Core) With(fields []zapcore.Field) zapcore.Core { + cloned := o.clone() + if len(fields) > 0 { + ctx, attrbuf := convertField(fields) + if ctx != nil { + cloned.ctx = ctx + } + cloned.attr = append(cloned.attr, attrbuf...) + } + return cloned +} + +func (o *Core) clone() *Core { + return &Core{ + provider: o.provider, + opts: o.opts, + logger: o.logger, + attr: slices.Clone(o.attr), + ctx: o.ctx, + } +} + +// Sync flushes buffered logs (if any). +func (o *Core) Sync() error { + return nil +} + +// Check determines whether the supplied Entry should be logged. +// If the entry should be logged, the Core adds itself to the CheckedEntry and returns the result. +func (o *Core) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + param := log.EnabledParameters{Severity: convertLevel(ent.Level)} + + logger := o.logger + if ent.LoggerName != "" { + logger = o.provider.Logger(ent.LoggerName, o.opts...) + } + + if logger.Enabled(context.Background(), param) { + return ce.AddCore(ent, o) + } + return ce +} + +// Write method encodes zap fields to OTel logs and emits them. +func (o *Core) Write(ent zapcore.Entry, fields []zapcore.Field) error { + r := log.Record{} + r.SetTimestamp(ent.Time) + r.SetBody(log.StringValue(ent.Message)) + r.SetSeverity(convertLevel(ent.Level)) + r.SetSeverityText(ent.Level.String()) + + r.AddAttributes(o.attr...) + if ent.Caller.Defined { + funcName, namespace := splitFuncName(ent.Caller.Function) + r.AddAttributes( + log.String(string(semconv.CodeFilepathKey), ent.Caller.File), + log.Int(string(semconv.CodeLineNumberKey), ent.Caller.Line), + log.String(string(semconv.CodeFunctionKey), funcName), + log.String(string(semconv.CodeNamespaceKey), namespace), + ) + } + if ent.Stack != "" { + r.AddAttributes(log.String(string(semconv.CodeStacktraceKey), ent.Stack)) + } + if len(fields) > 0 { + ctx, attrbuf := convertField(fields) + if ctx != nil { + o.ctx = ctx + } + r.AddAttributes(attrbuf...) + } + + logger := o.logger + if ent.LoggerName != "" { + logger = o.provider.Logger(ent.LoggerName, o.opts...) + } + logger.Emit(o.ctx, r) + return nil +} + +func convertField(fields []zapcore.Field) (context.Context, []log.KeyValue) { + var ctx context.Context + enc := newObjectEncoder(len(fields)) + for _, field := range fields { + if ctxFld, ok := field.Interface.(context.Context); ok { + ctx = ctxFld + continue + } + field.AddTo(enc) + } + + enc.calculate(enc.root) + return ctx, enc.root.attrs +} + +func convertLevel(level zapcore.Level) log.Severity { + switch level { + case zapcore.DebugLevel: + return log.SeverityDebug + case zapcore.InfoLevel: + return log.SeverityInfo + case zapcore.WarnLevel: + return log.SeverityWarn + case zapcore.ErrorLevel: + return log.SeverityError + case zapcore.DPanicLevel: + return log.SeverityFatal1 + case zapcore.PanicLevel: + return log.SeverityFatal2 + case zapcore.FatalLevel: + return log.SeverityFatal3 + default: + return log.SeverityUndefined + } +} + +// splitFuncName splits package path-qualified function name into +// function name and package full name (namespace). E.g. it splits +// "github.com/my/repo/pkg.foo" into +// "foo" and "github.com/my/repo/pkg". +func splitFuncName(f string) (string, string) { + i := strings.LastIndexByte(f, '.') + if i < 0 { + return "", "" + } + return f[i+1:], f[:i] +} diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go new file mode 100644 index 00000000000..8147576ae77 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/encoder.go @@ -0,0 +1,274 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +import ( + "time" + + "go.uber.org/zap/zapcore" + + "go.opentelemetry.io/otel/log" +) + +var ( + _ zapcore.ObjectEncoder = (*objectEncoder)(nil) + _ zapcore.ArrayEncoder = (*arrayEncoder)(nil) +) + +type namespace struct { + name string + attrs []log.KeyValue + next *namespace +} + +// objectEncoder implements zapcore.ObjectEncoder. +// It encodes given fields to OTel key-values. +type objectEncoder struct { + // root is a pointer to the default namespace + root *namespace + // cur is a pointer to the namespace we're currently writing to. + cur *namespace +} + +func newObjectEncoder(n int) *objectEncoder { + keyval := make([]log.KeyValue, 0, n) + m := &namespace{ + attrs: keyval, + } + return &objectEncoder{ + root: m, + cur: m, + } +} + +// It iterates to the end of the linked list and appends namespace data. +// Run this function before accessing complete result. +func (m *objectEncoder) calculate(o *namespace) { + if o.next == nil { + return + } + m.calculate(o.next) + o.attrs = append(o.attrs, log.Map(o.next.name, o.next.attrs...)) +} + +func (m *objectEncoder) AddArray(key string, v zapcore.ArrayMarshaler) error { + arr := newArrayEncoder() + err := v.MarshalLogArray(arr) + m.cur.attrs = append(m.cur.attrs, log.Slice(key, arr.elems...)) + return err +} + +func (m *objectEncoder) AddObject(k string, v zapcore.ObjectMarshaler) error { + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + newobj := newObjectEncoder(2) + err := v.MarshalLogObject(newobj) + newobj.calculate(newobj.root) + m.cur.attrs = append(m.cur.attrs, log.Map(k, newobj.root.attrs...)) + return err +} + +func (m *objectEncoder) AddBinary(k string, v []byte) { + m.cur.attrs = append(m.cur.attrs, log.Bytes(k, v)) +} + +func (m *objectEncoder) AddByteString(k string, v []byte) { + m.cur.attrs = append(m.cur.attrs, log.String(k, string(v))) +} + +func (m *objectEncoder) AddBool(k string, v bool) { + m.cur.attrs = append(m.cur.attrs, log.Bool(k, v)) +} + +func (m *objectEncoder) AddDuration(k string, v time.Duration) { + m.AddInt64(k, v.Nanoseconds()) +} + +func (m *objectEncoder) AddComplex128(k string, v complex128) { + r := log.Float64("r", real(v)) + i := log.Float64("i", imag(v)) + m.cur.attrs = append(m.cur.attrs, log.Map(k, r, i)) +} + +func (m *objectEncoder) AddFloat64(k string, v float64) { + m.cur.attrs = append(m.cur.attrs, log.Float64(k, v)) +} + +func (m *objectEncoder) AddInt64(k string, v int64) { + m.cur.attrs = append(m.cur.attrs, log.Int64(k, v)) +} + +func (m *objectEncoder) AddInt(k string, v int) { + m.cur.attrs = append(m.cur.attrs, log.Int(k, v)) +} + +func (m *objectEncoder) AddString(k string, v string) { + m.cur.attrs = append(m.cur.attrs, log.String(k, v)) +} + +func (m *objectEncoder) AddUint64(k string, v uint64) { + m.cur.attrs = append(m.cur.attrs, + log.KeyValue{ + Key: k, + Value: assignUintValue(v), + }) +} + +func (m *objectEncoder) AddReflected(k string, v interface{}) error { + m.cur.attrs = append(m.cur.attrs, + log.KeyValue{ + Key: k, + Value: convertValue(v), + }) + return nil +} + +// OpenNamespace opens an isolated namespace where all subsequent fields will +// be added. +func (m *objectEncoder) OpenNamespace(k string) { + keyValue := make([]log.KeyValue, 0, 5) + s := &namespace{ + name: k, + attrs: keyValue, + } + m.cur.next = s + m.cur = s +} + +func (m *objectEncoder) AddComplex64(k string, v complex64) { + m.AddComplex128(k, complex128(v)) +} + +func (m *objectEncoder) AddTime(k string, v time.Time) { + m.AddInt64(k, v.UnixNano()) +} + +func (m *objectEncoder) AddFloat32(k string, v float32) { + m.AddFloat64(k, float64(v)) +} + +func (m *objectEncoder) AddInt32(k string, v int32) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddInt16(k string, v int16) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddInt8(k string, v int8) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint(k string, v uint) { + m.AddUint64(k, uint64(v)) +} + +func (m *objectEncoder) AddUint32(k string, v uint32) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint16(k string, v uint16) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUint8(k string, v uint8) { + m.AddInt64(k, int64(v)) +} + +func (m *objectEncoder) AddUintptr(k string, v uintptr) { + m.AddUint64(k, uint64(v)) +} + +func assignUintValue(v uint64) log.Value { + const maxInt64 = ^uint64(0) >> 1 + if v > maxInt64 { + return log.Float64Value(float64(v)) + } + return log.Int64Value(int64(v)) // nolint:gosec // Overflow checked above. +} + +// arrayEncoder implements [zapcore.ArrayEncoder]. +type arrayEncoder struct { + elems []log.Value +} + +func newArrayEncoder() *arrayEncoder { + return &arrayEncoder{ + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + elems: make([]log.Value, 0, 2), + } +} + +func (a *arrayEncoder) AppendArray(v zapcore.ArrayMarshaler) error { + arr := newArrayEncoder() + err := v.MarshalLogArray(arr) + a.elems = append(a.elems, log.SliceValue(arr.elems...)) + return err +} + +func (a *arrayEncoder) AppendObject(v zapcore.ObjectMarshaler) error { + // Similar to console_encoder which uses capacity of 2: + // https://github.com/uber-go/zap/blob/bd0cf0447951b77aa98dcfc1ac19e6f58d3ee64f/zapcore/console_encoder.go#L33. + m := newObjectEncoder(2) + err := v.MarshalLogObject(m) + m.calculate(m.root) + a.elems = append(a.elems, log.MapValue(m.root.attrs...)) + return err +} + +func (a *arrayEncoder) AppendReflected(v interface{}) error { + a.elems = append(a.elems, convertValue(v)) + return nil +} + +func (a *arrayEncoder) AppendByteString(v []byte) { + a.elems = append(a.elems, log.StringValue(string(v))) +} + +func (a *arrayEncoder) AppendBool(v bool) { + a.elems = append(a.elems, log.BoolValue(v)) +} + +func (a *arrayEncoder) AppendFloat64(v float64) { + a.elems = append(a.elems, log.Float64Value(v)) +} + +func (a *arrayEncoder) AppendFloat32(v float32) { + a.AppendFloat64(float64(v)) +} + +func (a *arrayEncoder) AppendInt(v int) { + a.elems = append(a.elems, log.IntValue(v)) +} + +func (a *arrayEncoder) AppendInt64(v int64) { + a.elems = append(a.elems, log.Int64Value(v)) +} + +func (a *arrayEncoder) AppendString(v string) { + a.elems = append(a.elems, log.StringValue(v)) +} + +func (a *arrayEncoder) AppendComplex128(v complex128) { + r := log.Float64("r", real(v)) + i := log.Float64("i", imag(v)) + a.elems = append(a.elems, log.MapValue(r, i)) +} + +func (a *arrayEncoder) AppendUint64(v uint64) { + a.elems = append(a.elems, assignUintValue(v)) +} + +func (a *arrayEncoder) AppendComplex64(v complex64) { a.AppendComplex128(complex128(v)) } +func (a *arrayEncoder) AppendDuration(v time.Duration) { a.AppendInt64(v.Nanoseconds()) } +func (a *arrayEncoder) AppendInt32(v int32) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendInt16(v int16) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendInt8(v int8) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendTime(v time.Time) { a.AppendInt64(v.UnixNano()) } +func (a *arrayEncoder) AppendUint(v uint) { a.AppendUint64(uint64(v)) } +func (a *arrayEncoder) AppendUint32(v uint32) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUint16(v uint16) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUint8(v uint8) { a.AppendInt64(int64(v)) } +func (a *arrayEncoder) AppendUintptr(v uintptr) { a.AppendUint64(uint64(v)) } diff --git a/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go new file mode 100644 index 00000000000..5c8b2eea7e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/bridges/otelzap/gen.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelzap // import "go.opentelemetry.io/contrib/bridges/otelzap" + +// Generate convert: +//go:generate gotmpl --body=../../internal/shared/logutil/convert_test.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert_test.go +//go:generate gotmpl --body=../../internal/shared/logutil/convert.go.tmpl "--data={ \"pkg\": \"otelzap\" }" --out=convert.go diff --git a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go index 5626bdc5545..e94d07a6d46 100644 --- a/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go +++ b/vendor/go.opentelemetry.io/contrib/bridges/prometheus/producer.go @@ -214,10 +214,10 @@ func convertExponentialBuckets(bucketSpans []*dto.BucketSpan, deltas []int64) me initialOffset := bucketSpans[0].GetOffset() - 1 // We will have one bucket count for each delta, and zeros for the offsets // after the initial offset. - lenCounts := int32(len(deltas)) + lenCounts := len(deltas) for i, bs := range bucketSpans { if i != 0 { - lenCounts += bs.GetOffset() + lenCounts += int(bs.GetOffset()) } } counts := make([]uint64, lenCounts) diff --git a/vendor/go.opentelemetry.io/contrib/config/config.go b/vendor/go.opentelemetry.io/contrib/config/config.go index 15f471bc6ed..6b8d43cd468 100644 --- a/vendor/go.opentelemetry.io/contrib/config/config.go +++ b/vendor/go.opentelemetry.io/contrib/config/config.go @@ -7,6 +7,7 @@ import ( "context" "errors" + "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/trace" ) @@ -35,6 +36,7 @@ func noopShutdown(context.Context) error { type SDK struct { meterProvider metric.MeterProvider tracerProvider trace.TracerProvider + loggerProvider log.LoggerProvider shutdown shutdownFunc } @@ -48,6 +50,11 @@ func (s *SDK) MeterProvider() metric.MeterProvider { return s.meterProvider } +// LoggerProvider returns a configured log.LoggerProvider. +func (s *SDK) LoggerProvider() log.LoggerProvider { + return s.loggerProvider +} + // Shutdown calls shutdown on all configured providers. func (s *SDK) Shutdown(ctx context.Context) error { return s.shutdown(ctx) @@ -77,12 +84,17 @@ func NewSDK(opts ...ConfigurationOption) (SDK, error) { return SDK{}, err } + lp, lpShutdown, err := loggerProvider(o, r) + if err != nil { + return SDK{}, err + } + return SDK{ meterProvider: mp, tracerProvider: tp, + loggerProvider: lp, shutdown: func(ctx context.Context) error { - err := mpShutdown(ctx) - return errors.Join(err, tpShutdown(ctx)) + return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) }, }, nil } @@ -118,6 +130,3 @@ func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) Configuratio // TODO: implement parsing functionality: // - https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4373 // - https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4412 - -// TODO: create SDK from the model: -// - https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4371 diff --git a/vendor/go.opentelemetry.io/contrib/config/generated_config.go b/vendor/go.opentelemetry.io/contrib/config/generated_config.go index 64154ee5437..244a9756899 100644 --- a/vendor/go.opentelemetry.io/contrib/config/generated_config.go +++ b/vendor/go.opentelemetry.io/contrib/config/generated_config.go @@ -18,12 +18,7 @@ type AttributeLimits struct { AdditionalProperties interface{} } -type Attributes struct { - // ServiceName corresponds to the JSON schema field "service.name". - ServiceName *string `mapstructure:"service.name,omitempty"` - - AdditionalProperties interface{} -} +type Attributes map[string]interface{} type BatchLogRecordProcessor struct { // ExportTimeout corresponds to the JSON schema field "export_timeout". @@ -101,9 +96,33 @@ type Common map[string]interface{} type Console map[string]interface{} +type Detectors struct { + // Attributes corresponds to the JSON schema field "attributes". + Attributes *DetectorsAttributes `mapstructure:"attributes,omitempty"` +} + +type DetectorsAttributes struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `mapstructure:"included,omitempty"` +} + type Headers map[string]string +type IncludeExclude struct { + // Excluded corresponds to the JSON schema field "excluded". + Excluded []string `mapstructure:"excluded,omitempty"` + + // Included corresponds to the JSON schema field "included". + Included []string `mapstructure:"included,omitempty"` +} + type LogRecordExporter struct { + // Console corresponds to the JSON schema field "console". + Console Console `mapstructure:"console,omitempty"` + // OTLP corresponds to the JSON schema field "otlp". OTLP *OTLP `mapstructure:"otlp,omitempty"` @@ -186,6 +205,9 @@ type OTLP struct { // Headers corresponds to the JSON schema field "headers". Headers Headers `mapstructure:"headers,omitempty"` + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `mapstructure:"insecure,omitempty"` + // Protocol corresponds to the JSON schema field "protocol". Protocol string `mapstructure:"protocol"` @@ -216,6 +238,9 @@ type OTLPMetric struct { // Headers corresponds to the JSON schema field "headers". Headers Headers `mapstructure:"headers,omitempty"` + // Insecure corresponds to the JSON schema field "insecure". + Insecure *bool `mapstructure:"insecure,omitempty"` + // Protocol corresponds to the JSON schema field "protocol". Protocol string `mapstructure:"protocol"` @@ -381,6 +406,10 @@ type Prometheus struct { // Port corresponds to the JSON schema field "port". Port *int `mapstructure:"port,omitempty"` + // WithResourceConstantLabels corresponds to the JSON schema field + // "with_resource_constant_labels". + WithResourceConstantLabels *IncludeExclude `mapstructure:"with_resource_constant_labels,omitempty"` + // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". WithoutScopeInfo *bool `mapstructure:"without_scope_info,omitempty"` @@ -423,7 +452,10 @@ func (j *PullMetricReader) UnmarshalJSON(b []byte) error { type Resource struct { // Attributes corresponds to the JSON schema field "attributes". - Attributes *Attributes `mapstructure:"attributes,omitempty"` + Attributes Attributes `mapstructure:"attributes,omitempty"` + + // Detectors corresponds to the JSON schema field "detectors". + Detectors *Detectors `mapstructure:"detectors,omitempty"` // SchemaUrl corresponds to the JSON schema field "schema_url". SchemaUrl *string `mapstructure:"schema_url,omitempty"` diff --git a/vendor/go.opentelemetry.io/contrib/config/log.go b/vendor/go.opentelemetry.io/contrib/config/log.go new file mode 100644 index 00000000000..f30b37c8a45 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/config/log.go @@ -0,0 +1,155 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package config // import "go.opentelemetry.io/contrib/config" + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/noop" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" +) + +func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { + if cfg.opentelemetryConfig.LoggerProvider == nil { + return noop.NewLoggerProvider(), noopShutdown, nil + } + opts := []sdklog.LoggerProviderOption{ + sdklog.WithResource(res), + } + var errs []error + for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { + sp, err := logProcessor(cfg.ctx, processor) + if err == nil { + opts = append(opts, sdklog.WithProcessor(sp)) + } else { + errs = append(errs, err) + } + } + + if len(errs) > 0 { + return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) + } + + lp := sdklog.NewLoggerProvider(opts...) + return lp, lp.Shutdown, nil +} + +func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { + if processor.Batch != nil && processor.Simple != nil { + return nil, errors.New("must not specify multiple log processor type") + } + if processor.Batch != nil { + exp, err := logExporter(ctx, processor.Batch.Exporter) + if err != nil { + return nil, err + } + return batchLogProcessor(processor.Batch, exp) + } + if processor.Simple != nil { + exp, err := logExporter(ctx, processor.Simple.Exporter) + if err != nil { + return nil, err + } + return sdklog.NewSimpleProcessor(exp), nil + } + return nil, fmt.Errorf("unsupported log processor type, must be one of simple or batch") +} + +func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { + if exporter.Console != nil && exporter.OTLP != nil { + return nil, errors.New("must not specify multiple exporters") + } + + if exporter.Console != nil { + return stdoutlog.New( + stdoutlog.WithPrettyPrint(), + ) + } + + if exporter.OTLP != nil { + switch exporter.OTLP.Protocol { + case protocolProtobufHTTP: + return otlpHTTPLogExporter(ctx, exporter.OTLP) + default: + return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) + } + } + return nil, errors.New("no valid log exporter") +} + +func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { + var opts []sdklog.BatchProcessorOption + if blp.ExportTimeout != nil { + if *blp.ExportTimeout < 0 { + return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) + } + opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) + } + if blp.MaxExportBatchSize != nil { + if *blp.MaxExportBatchSize < 0 { + return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) + } + opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) + } + if blp.MaxQueueSize != nil { + if *blp.MaxQueueSize < 0 { + return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) + } + opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) + } + + if blp.ScheduleDelay != nil { + if *blp.ScheduleDelay < 0 { + return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) + } + opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) + } + + return sdklog.NewBatchProcessor(exp, opts...), nil +} + +func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { + var opts []otlploghttp.Option + + if len(otlpConfig.Endpoint) > 0 { + u, err := url.ParseRequestURI(otlpConfig.Endpoint) + if err != nil { + return nil, err + } + opts = append(opts, otlploghttp.WithEndpoint(u.Host)) + + if u.Scheme == "http" { + opts = append(opts, otlploghttp.WithInsecure()) + } + if len(u.Path) > 0 { + opts = append(opts, otlploghttp.WithURLPath(u.Path)) + } + } + if otlpConfig.Compression != nil { + switch *otlpConfig.Compression { + case compressionGzip: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) + case compressionNone: + opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) + default: + return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) + } + } + if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { + opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) + } + if len(otlpConfig.Headers) > 0 { + opts = append(opts, otlploghttp.WithHeaders(otlpConfig.Headers)) + } + + return otlploghttp.New(ctx, opts...) +} diff --git a/vendor/go.opentelemetry.io/contrib/config/metric.go b/vendor/go.opentelemetry.io/contrib/config/metric.go index 0b7543d3dbc..38b294172fb 100644 --- a/vendor/go.opentelemetry.io/contrib/config/metric.go +++ b/vendor/go.opentelemetry.io/contrib/config/metric.go @@ -8,6 +8,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "net" "net/http" "net/url" @@ -18,16 +19,22 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" otelprom "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/sdk/instrumentation" sdkmetric "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" ) +var zeroScope instrumentation.Scope + +const instrumentKindUndefined = sdkmetric.InstrumentKind(0) + func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { if cfg.opentelemetryConfig.MeterProvider == nil { return noop.NewMeterProvider(), noopShutdown, nil @@ -45,6 +52,15 @@ func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvi errs = append(errs, err) } } + for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { + v, err := view(vw) + if err == nil { + opts = append(opts, sdkmetric.WithView(v)) + } else { + errs = append(errs, err) + } + } + if len(errs) > 0 { return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) } @@ -59,7 +75,15 @@ func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) } if r.Periodic != nil { - return periodicExporter(ctx, r.Periodic.Exporter) + var opts []sdkmetric.PeriodicReaderOption + if r.Periodic.Interval != nil { + opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) + } + + if r.Periodic.Timeout != nil { + opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) + } + return periodicExporter(ctx, r.Periodic.Exporter, opts...) } if r.Pull != nil { @@ -148,7 +172,7 @@ func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmet } func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetricgrpc.Option{} + var opts []otlpmetricgrpc.Option if len(otlpConfig.Endpoint) > 0 { u, err := url.ParseRequestURI(otlpConfig.Endpoint) @@ -207,6 +231,22 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { opts = append(opts, otelprom.WithoutUnits()) } + if prometheusConfig.WithResourceConstantLabels != nil { + if prometheusConfig.WithResourceConstantLabels.Included != nil { + var keys []attribute.Key + for _, val := range prometheusConfig.WithResourceConstantLabels.Included { + keys = append(keys, attribute.Key(val)) + } + otelprom.WithResourceAsConstantLabels(attribute.NewAllowKeysFilter(keys...)) + } + if prometheusConfig.WithResourceConstantLabels.Excluded != nil { + var keys []attribute.Key + for _, val := range prometheusConfig.WithResourceConstantLabels.Included { + keys = append(keys, attribute.Key(val)) + } + otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter(keys...)) + } + } reg := prometheus.NewRegistry() opts = append(opts, otelprom.WithRegisterer(reg)) @@ -214,7 +254,7 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet mux := http.NewServeMux() mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) server := http.Server{ - // Timeouts are necessary to make a server resilent to attacks, but ListenAndServe doesn't set any. + // Timeouts are necessary to make a server resilient to attacks, but ListenAndServe doesn't set any. // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts ReadTimeout: 5 * time.Second, WriteTimeout: 10 * time.Second, @@ -223,9 +263,6 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet } addr := fmt.Sprintf("%s:%d", *prometheusConfig.Host, *prometheusConfig.Port) - // TODO: add support for constant label filter - // otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()), - // ) reader, err := otelprom.New(opts...) if err != nil { return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) @@ -239,7 +276,7 @@ func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmet } go func() { - if err := server.Serve(lis); err != nil && err != http.ErrServerClosed { + if err := server.Serve(lis); err != nil && errors.Is(err, http.ErrServerClosed) { otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) } }() @@ -258,3 +295,155 @@ func (rws readerWithServer) Shutdown(ctx context.Context) error { rws.server.Shutdown(ctx), ) } + +func view(v View) (sdkmetric.View, error) { + if v.Selector == nil { + return nil, errors.New("view: no selector provided") + } + + inst, err := instrument(*v.Selector) + if err != nil { + return nil, err + } + + return sdkmetric.NewView(inst, stream(v.Stream)), nil +} + +func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { + kind, err := instrumentKind(vs.InstrumentType) + if err != nil { + return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) + } + inst := sdkmetric.Instrument{ + Name: strOrEmpty(vs.InstrumentName), + Unit: strOrEmpty(vs.Unit), + Kind: kind, + Scope: instrumentation.Scope{ + Name: strOrEmpty(vs.MeterName), + Version: strOrEmpty(vs.MeterVersion), + SchemaURL: strOrEmpty(vs.MeterSchemaUrl), + }, + } + + if instrumentIsEmpty(inst) { + return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") + } + return inst, nil +} + +func stream(vs *ViewStream) sdkmetric.Stream { + if vs == nil { + return sdkmetric.Stream{} + } + + return sdkmetric.Stream{ + Name: strOrEmpty(vs.Name), + Description: strOrEmpty(vs.Description), + Aggregation: aggregation(vs.Aggregation), + AttributeFilter: attributeFilter(vs.AttributeKeys), + } +} + +func attributeFilter(attributeKeys []string) attribute.Filter { + var attrKeys []attribute.Key + for _, attrStr := range attributeKeys { + attrKeys = append(attrKeys, attribute.Key(attrStr)) + } + return attribute.NewAllowKeysFilter(attrKeys...) +} + +func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { + if aggr == nil { + return nil + } + + if aggr.Base2ExponentialBucketHistogram != nil { + return sdkmetric.AggregationBase2ExponentialHistogram{ + MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), + MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), + } + } + if aggr.Default != nil { + // TODO: Understand what to set here. + return nil + } + if aggr.Drop != nil { + return sdkmetric.AggregationDrop{} + } + if aggr.ExplicitBucketHistogram != nil { + return sdkmetric.AggregationExplicitBucketHistogram{ + Boundaries: aggr.ExplicitBucketHistogram.Boundaries, + // Need to negate because config has the positive action RecordMinMax. + NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), + } + } + if aggr.LastValue != nil { + return sdkmetric.AggregationLastValue{} + } + if aggr.Sum != nil { + return sdkmetric.AggregationSum{} + } + return nil +} + +func instrumentKind(vsit *ViewSelectorInstrumentType) (sdkmetric.InstrumentKind, error) { + if vsit == nil { + // Equivalent to instrumentKindUndefined. + return instrumentKindUndefined, nil + } + + switch *vsit { + case ViewSelectorInstrumentTypeCounter: + return sdkmetric.InstrumentKindCounter, nil + case ViewSelectorInstrumentTypeUpDownCounter: + return sdkmetric.InstrumentKindUpDownCounter, nil + case ViewSelectorInstrumentTypeHistogram: + return sdkmetric.InstrumentKindHistogram, nil + case ViewSelectorInstrumentTypeObservableCounter: + return sdkmetric.InstrumentKindObservableCounter, nil + case ViewSelectorInstrumentTypeObservableUpDownCounter: + return sdkmetric.InstrumentKindObservableUpDownCounter, nil + case ViewSelectorInstrumentTypeObservableGauge: + return sdkmetric.InstrumentKindObservableGauge, nil + } + + return instrumentKindUndefined, errors.New("instrument_type: invalid value") +} + +func instrumentIsEmpty(i sdkmetric.Instrument) bool { + return i.Name == "" && + i.Description == "" && + i.Kind == instrumentKindUndefined && + i.Unit == "" && + i.Scope == zeroScope +} + +func boolOrFalse(pBool *bool) bool { + if pBool == nil { + return false + } + return *pBool +} + +func int32OrZero(pInt *int) int32 { + if pInt == nil { + return 0 + } + i := *pInt + if i > math.MaxInt32 { + return math.MaxInt32 + } + if i < math.MinInt32 { + return math.MinInt32 + } + return int32(i) // nolint: gosec // Overflow and underflow checked above. +} + +func strOrEmpty(pStr *string) string { + if pStr == nil { + return "" + } + return *pStr +} diff --git a/vendor/go.opentelemetry.io/contrib/config/resource.go b/vendor/go.opentelemetry.io/contrib/config/resource.go index 302a59637e6..020d6660b23 100644 --- a/vendor/go.opentelemetry.io/contrib/config/resource.go +++ b/vendor/go.opentelemetry.io/contrib/config/resource.go @@ -8,7 +8,6 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.25.0" ) func keyVal(k string, v any) attribute.KeyValue { @@ -50,14 +49,10 @@ func newResource(res *Resource) (*resource.Resource, error) { if res == nil || res.Attributes == nil { return resource.Default(), nil } - attrs := []attribute.KeyValue{ - semconv.ServiceName(*res.Attributes.ServiceName), - } + var attrs []attribute.KeyValue - if props, ok := res.Attributes.AdditionalProperties.(map[string]any); ok { - for k, v := range props { - attrs = append(attrs, keyVal(k, v)) - } + for k, v := range res.Attributes { + attrs = append(attrs, keyVal(k, v)) } return resource.Merge(resource.Default(), diff --git a/vendor/go.opentelemetry.io/contrib/config/trace.go b/vendor/go.opentelemetry.io/contrib/config/trace.go index 3666cea5378..aff4c3584ec 100644 --- a/vendor/go.opentelemetry.io/contrib/config/trace.go +++ b/vendor/go.opentelemetry.io/contrib/config/trace.go @@ -83,7 +83,7 @@ func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanP } return sdktrace.NewSimpleSpanProcessor(exp), nil } - return nil, fmt.Errorf("unsupported span processor type %v", processor) + return nil, fmt.Errorf("unsupported span processor type, must be one of simple or batch") } func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { diff --git a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go index 9e926ce32e1..618379c621d 100644 --- a/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go +++ b/vendor/go.opentelemetry.io/contrib/exporters/autoexport/logs.go @@ -7,6 +7,7 @@ import ( "context" "os" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" "go.opentelemetry.io/otel/sdk/log" @@ -31,6 +32,8 @@ var logsSignal = newSignal[log.Exporter]("OTEL_LOGS_EXPORTER") // supported values: // - "http/protobuf" (default) - protobuf-encoded data over HTTP connection; // see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp] +// - "grpc" - gRPC with protobuf-encoded data over HTTP/2 connection; +// see: [go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc] // // OTEL_EXPORTER_OTLP_LOGS_PROTOCOL defines OTLP exporter's transport protocol for the logs signal; // supported values are the same as OTEL_EXPORTER_OTLP_PROTOCOL. @@ -67,9 +70,8 @@ func init() { } switch proto { - // grpc is not supported yet, should comment out when it is supported - // case "grpc": - // return otlploggrpc.New(ctx) + case "grpc": + return otlploggrpc.New(ctx) case "http/protobuf": return otlploghttp.New(ctx) default: diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index a199b36b4fa..9e87fb4bb19 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -30,7 +30,7 @@ const ( type InterceptorFilter func(*InterceptorInfo) bool // Filter is a predicate used to determine whether a given request in -// should be instrumented by the attatched RPC tag info. +// should be instrumented by the attached RPC tag info. // A Filter must return true if the request should be instrumented. type Filter func(*stats.RPCTagInfo) bool @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -49,11 +51,11 @@ type config struct { tracer trace.Tracer meter metric.Meter - rpcDuration metric.Float64Histogram - rpcRequestSize metric.Int64Histogram - rpcResponseSize metric.Int64Histogram - rpcRequestsPerRPC metric.Int64Histogram - rpcResponsesPerRPC metric.Int64Histogram + rpcDuration metric.Float64Histogram + rpcInBytes metric.Int64Histogram + rpcOutBytes metric.Int64Histogram + rpcInMessages metric.Int64Histogram + rpcOutMessages metric.Int64Histogram } // Option applies an option value for a config. @@ -94,46 +96,64 @@ func newConfig(opts []Option, role string) *config { } } - c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcRequestSize == nil { - c.rpcRequestSize = noop.Int64Histogram{} + if rpcRequestSize == nil { + rpcRequestSize = noop.Int64Histogram{} } } - c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcResponseSize == nil { - c.rpcResponseSize = noop.Int64Histogram{} + if rpcResponseSize == nil { + rpcResponseSize = noop.Int64Histogram{} } } - c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcRequestsPerRPC == nil { - c.rpcRequestsPerRPC = noop.Int64Histogram{} + if rpcRequestsPerRPC == nil { + rpcRequestsPerRPC = noop.Int64Histogram{} } } - c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcResponsesPerRPC == nil { - c.rpcResponsesPerRPC = noop.Int64Histogram{} + if rpcResponsesPerRPC == nil { + rpcResponsesPerRPC = noop.Int64Histogram{} } } + switch role { + case "client": + c.rpcInBytes = rpcResponseSize + c.rpcInMessages = rpcResponsesPerRPC + c.rpcOutBytes = rpcRequestSize + c.rpcOutMessages = rpcRequestsPerRPC + case "server": + c.rpcInBytes = rpcRequestSize + c.rpcInMessages = rpcRequestsPerRPC + c.rpcOutBytes = rpcResponseSize + c.rpcOutMessages = rpcResponsesPerRPC + default: + c.rpcInBytes = noop.Int64Histogram{} + c.rpcInMessages = noop.Int64Histogram{} + c.rpcOutBytes = noop.Int64Histogram{} + c.rpcOutMessages = noop.Int64Histogram{} + } + return c } @@ -257,3 +277,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index 7f19058e4c4..7d5ed058082 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -7,6 +7,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md import ( "context" + "errors" "io" "net" "strconv" @@ -136,7 +137,7 @@ func (w *clientStream) RecvMsg(m interface{}) error { if err == nil && !w.desc.ServerStreams { w.endSpan(nil) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { w.endSpan(nil) } else if err != nil { w.endSpan(err) @@ -333,7 +334,7 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { elapsedTime := float64(time.Since(before)) / float64(time.Millisecond) metricAttrs = append(metricAttrs, grpcStatusCodeAttr) - cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) return resp, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fad58733fec..c01cb897cd3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,21 +13,22 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) type gRPCContextKey struct{} type gRPCContext struct { - messagesReceived int64 - messagesSent int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -62,11 +63,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +103,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -150,8 +151,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + messageId = atomic.AddInt64(&gctx.inMessages, 1) + c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,8 +167,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + messageId = atomic.AddInt64(&gctx.outMessages, 1) + c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -204,14 +205,17 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool span.End() metricAttrs = append(metricAttrs, rpcStatusAttr) + // Allocate vararg slice once. + recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))} // Use floating point division here for higher precision (instead of Millisecond method). + // Measure right before calling Record() to capture as much elapsed time as possible. elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) + c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) } default: return diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 3f9cfda5413..25a3a86296a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.52.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go index 6aae83bfd20..b25641c55d3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -18,7 +18,7 @@ var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} // Get is a convenient replacement for http.Get that adds a span around the request. func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetURL, nil) if err != nil { return nil, err } @@ -27,7 +27,7 @@ func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) // Head is a convenient replacement for http.Head that adds a span around the request. func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, targetURL, nil) if err != nil { return nil, err } @@ -36,7 +36,7 @@ func Head(ctx context.Context, targetURL string) (resp *http.Response, err error // Post is a convenient replacement for http.Post that adds a span around the request. func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { - req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, targetURL, body) if err != nil { return nil, err } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b7b..a83a026274a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b77..e555a475f13 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) { h.semconv = semconv.NewHTTPServer(c.Meter) } -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -123,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -190,14 +189,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab86a..3b036f8a37b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -9,6 +9,7 @@ import ( "net/http" "os" "strings" + "sync" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -50,9 +51,9 @@ type HTTPServer struct { // The req Host will be used to determine the server instead. func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) } - return oldHTTPServer{}.RequestTraceAttrs(server, req) + return OldHTTPServer{}.RequestTraceAttrs(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. @@ -60,14 +61,14 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { if s.duplicate { - return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) } - return oldHTTPServer{}.ResponseTraceAttrs(resp) + return OldHTTPServer{}.ResponseTraceAttrs(resp) } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return oldHTTPServer{}.Route(route) + return OldHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -83,29 +84,46 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + ElapsedTime float64 +} + +var metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + // This will happen if an HTTPServer{} is used instead of NewHTTPServer. return } - attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) // TODO: Duplicate Metrics } @@ -116,34 +134,43 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) return server } type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } -func NewHTTPClient() HTTPClient { +func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) } - return oldHTTPClient{}.RequestTraceAttrs(req) + return OldHTTPClient{}.RequestTraceAttrs(req) } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { if c.duplicate { - return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) } - return oldHTTPClient{}.ResponseTraceAttrs(resp) + return OldHTTPClient{}.ResponseTraceAttrs(resp) } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -158,8 +185,53 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { func (c HTTPClient) ErrorType(err error) attribute.KeyValue { if c.duplicate { - return newHTTPClient{}.ErrorType(err) + return CurrentHTTPClient{}.ErrorType(err) } return attribute.KeyValue{} } + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 745b8c67bc4..dc9ec7bc39e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -14,7 +14,7 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -type newHTTPServer struct{} +type CurrentHTTPServer struct{} // TraceRequest returns trace attributes for an HTTP request received by a // server. @@ -32,18 +32,18 @@ type newHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } @@ -59,7 +59,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att scheme := n.scheme(req.TLS != nil) - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -104,7 +104,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att attrs = append(attrs, methodOriginal) } - if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) @@ -135,7 +135,7 @@ func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return attrs } -func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } @@ -150,7 +150,7 @@ func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyV return semconvNew.HTTPRequestMethodGet, orig } -func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconvNew.URLScheme("https") } @@ -160,7 +160,7 @@ func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive // TraceResponse returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int if resp.ReadBytes > 0 { @@ -195,14 +195,14 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (n newHTTPServer) Route(route string) attribute.KeyValue { +func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } -type newHTTPClient struct{} +type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. -func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { /* below attributes are returned: - http.request.method @@ -222,7 +222,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue var requestHost string var requestPort int for _, hostport := range []string{urlHost, req.Header.Get("Host")} { - requestHost, requestPort = splitHostPort(hostport) + requestHost, requestPort = SplitHostPort(hostport) if requestHost != "" || requestPort > 0 { break } @@ -284,7 +284,7 @@ func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue } // ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. -func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (n CurrentHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { /* below attributes are returned: - http.response.status_code @@ -311,7 +311,7 @@ func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyVa return attrs } -func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { +func (n CurrentHTTPClient) ErrorType(err error) attribute.KeyValue { t := reflect.TypeOf(err) var value string if t.PkgPath() == "" && t.Name() == "" { @@ -328,7 +328,7 @@ func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { return semconvNew.ErrorTypeKey.String(value) } -func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { +func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e6e14924f57..93e8d0f94c1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -14,14 +14,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) -// splitHostPort splits a network address hostport of the form "host", +// SplitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. -func splitHostPort(hostport string) (host string, port int) { +func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e675..c042249dd72 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -17,7 +17,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) -type oldHTTPServer struct{} +type OldHTTPServer struct{} // RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. @@ -35,14 +35,14 @@ type oldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { return semconvutil.HTTPServerRequest(server, req) } // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { attributes := []attribute.KeyValue{} if resp.ReadBytes > 0 { @@ -67,7 +67,7 @@ func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke } // Route returns the attribute for the route. -func (o oldHTTPServer) Route(route string) attribute.KeyValue { +func (o OldHTTPServer) Route(route string) attribute.KeyValue { return semconv.HTTPRoute(route) } @@ -84,7 +84,7 @@ const ( serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds ) -func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { +func (h OldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { if meter == nil { return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} } @@ -113,17 +113,17 @@ func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, serverLatencyMeasure } -func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { +func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { n := len(additionalAttributes) + 3 var host string var p int if server == "" { - host, p = splitHostPort(req.Host) + host, p = SplitHostPort(req.Host) } else { // Prioritize the primary server name. - host, p = splitHostPort(server) + host, p = SplitHostPort(server) if p < 0 { - _, p = splitHostPort(req.Host) + _, p = SplitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) @@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + standardizeHTTPMethodMetric(req.Method), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,29 +164,111 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - -func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive +func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS } return semconv.HTTPSchemeHTTP } -type oldHTTPClient struct{} +type OldHTTPClient struct{} -func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { return semconvutil.HTTPClientRequest(req) } -func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } + +func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 00000000000..9476ef01b01 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d3438b..39681ad4b09 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 1133961d393..353e43b91fd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.55.0" + return "0.58.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go index 905cd71fe8c..45d0811a26e 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/b3_propagator.go @@ -262,11 +262,11 @@ func extractSingle(ctx context.Context, contextHeader string) (context.Context, case string(contextHeader[traceID64BitsWidth]) == "-": // traceID must be 64 bits pos += traceID64BitsWidth // {traceID} - traceID = b3TraceIDPadding + string(contextHeader[0:pos]) + traceID = b3TraceIDPadding + contextHeader[0:pos] case string(contextHeader[32]) == "-": // traceID must be 128 bits pos += traceID128BitsWidth // {traceID} - traceID = string(contextHeader[0:pos]) + traceID = contextHeader[0:pos] default: return ctx, empty, errInvalidTraceIDValue } @@ -277,6 +277,9 @@ func extractSingle(ctx context.Context, contextHeader string) (context.Context, } pos += separatorWidth // {traceID}- + if headerLen < pos+spanIDWidth { + return ctx, empty, errInvalidSpanIDValue + } scc.SpanID, err = trace.SpanIDFromHex(contextHeader[pos : pos+spanIDWidth]) if err != nil { return ctx, empty, errInvalidSpanIDValue diff --git a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go index 18932c8441a..cb5239ed692 100644 --- a/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go +++ b/vendor/go.opentelemetry.io/contrib/propagators/b3/version.go @@ -5,7 +5,7 @@ package b3 // import "go.opentelemetry.io/contrib/propagators/b3" // Version is the current release version of the B3 propagator. func Version() string { - return "1.27.0" + return "1.33.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664beb..ae8577ef366 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d09555506f7..ce3f40b609c 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -22,6 +22,7 @@ linters: - govet - ineffassign - misspell + - perfsprint - revive - staticcheck - tenv @@ -30,6 +31,7 @@ linters: - unconvert - unused - unparam + - usestdlibvars issues: # Maximum issues count per one linter. @@ -61,10 +63,11 @@ issues: text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive - # It's okay to not run gosec in a test. + # It's okay to not run gosec and perfsprint in a test. - path: _test\.go linters: - gosec + - perfsprint # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" @@ -95,6 +98,13 @@ linters-settings: - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" + auto/sdk: + files: + - "!internal/global/trace.go" + - "~internal/global/trace_test.go" + deny: + - pkg: "go.opentelemetry.io/auto/sdk" + desc: Do not use SDK from automatic instrumentation. otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" @@ -127,8 +137,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" @@ -156,6 +164,12 @@ linters-settings: locale: US ignore-words: - cancelled + perfsprint: + err-error: true + errorf: true + int-conversion: true + sprintf1: true + strconcat: true revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4b361d0269c..a30988f25d0 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,9 +8,84 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 + +### Added + +- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994) +- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`. + This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`. + Users can use it to avoid performing computationally expensive operations when recording measurements. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016) + +### Changed + +- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package. + See that package for more information. (#5920) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929) +- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929) +- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011) +- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009) + +### Fixed + +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954) +- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954) +- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) +- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) + +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + ## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 ### Added @@ -3110,7 +3185,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD +[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index bb339655743..22a2e9dbd49 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -629,6 +629,10 @@ should be canceled. ## Approvers and Maintainers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + ### Approvers ### Maintainers @@ -641,13 +645,13 @@ should be canceled. ### Emeritus -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Evan Torrie](https://github.com/evantorrie), Yahoo -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a1228a21240..a7f6d8cc688 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -14,8 +14,8 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default -ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage +precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage # Tools @@ -235,6 +235,16 @@ govulncheck/%: $(GOVULNCHECK) codespell: $(CODESPELL) @$(DOCKERPY) $(CODESPELL) +.PHONY: toolchain-check +toolchain-check: + @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \ + awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \ + done); \ + if [ -n "$${toolchainRes}" ]; then \ + echo "toolchain checking failed:"; echo "$${toolchainRes}"; \ + exit 1; \ + fi + .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ @@ -260,7 +270,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md index 412f1e362bb..b8cb605c166 100644 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -26,7 +26,7 @@ is designed so the following goals can be achieved. go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 36f5367030c..0e1fe242203 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -355,7 +355,7 @@ func parseMember(member string) (Member, error) { } // replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. -func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { +func replaceInvalidUTF8Sequences(c int, unescapeVal string) string { if utf8.ValidString(unescapeVal) { return unescapeVal } @@ -363,7 +363,7 @@ func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 var b strings.Builder - b.Grow(cap) + b.Grow(c) for i := 0; i < len(unescapeVal); { r, size := utf8.DecodeRuneInString(unescapeVal[i:]) if r == utf8.RuneError && size == 1 { diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go index 18c80c9b1ba..2a866f7f8bd 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel/span_context.go @@ -4,6 +4,8 @@ package oc2otel // import "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" import ( + "slices" + octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" @@ -14,9 +16,19 @@ func SpanContext(sc octrace.SpanContext) trace.SpanContext { if sc.IsSampled() { traceFlags = trace.FlagsSampled } + + entries := slices.Clone(sc.Tracestate.Entries()) + slices.Reverse(entries) + + tsOtel := trace.TraceState{} + for _, entry := range entries { + tsOtel, _ = tsOtel.Insert(entry.Key, entry.Value) + } + return trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID(sc.TraceID), SpanID: trace.SpanID(sc.SpanID), TraceFlags: traceFlags, + TraceState: tsOtel, }) } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go index 4d053e8016d..f38598bbfda 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric/metric.go @@ -144,7 +144,7 @@ func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.Time Attributes: attrs, StartTime: t.StartTime, Time: p.Time, - Count: uint64(dist.Count), + Count: uint64(max(0, dist.Count)), // nolint:gosec // A count should never be negative. Sum: dist.Sum, Bounds: dist.BucketOptions.Bounds, BucketCounts: bucketCounts, @@ -166,7 +166,7 @@ func convertBuckets(buckets []ocmetricdata.Bucket) ([]uint64, []metricdata.Exemp err = errors.Join(err, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count)) continue } - bucketCounts[i] = uint64(bucket.Count) + bucketCounts[i] = uint64(max(0, bucket.Count)) // nolint:gosec // A count should never be negative. if bucket.Exemplar != nil { exemplar, exemplarErr := convertExemplar(bucket.Exemplar) @@ -233,7 +233,7 @@ func convertKV(key string, value any) attribute.KeyValue { case uintptr: return uint64KV(key, uint64(typedVal)) case uint64: - return uint64KV(key, uint64(typedVal)) + return uint64KV(key, typedVal) case float32: return attribute.Float64(key, float64(typedVal)) case float64: @@ -301,7 +301,7 @@ func uintKV(key string, val uint) attribute.KeyValue { if val > uint(math.MaxInt) { return attribute.String(key, strconv.FormatUint(uint64(val), 10)) } - return attribute.Int(key, int(val)) + return attribute.Int(key, int(val)) // nolint: gosec // Overflow checked above. } func uintSliceKV[N uint | uint8 | uint16 | uint32 | uint64 | uintptr](key string, val []N) attribute.KeyValue { @@ -317,7 +317,7 @@ func uint64KV(key string, val uint64) attribute.KeyValue { if val > maxInt64 { return attribute.String(key, strconv.FormatUint(val, 10)) } - return attribute.Int64(key, int64(val)) + return attribute.Int64(key, int64(val)) // nolint: gosec // Overflow checked above. } func complexSliceKV[N complex64 | complex128](key string, val []N) attribute.KeyValue { @@ -357,7 +357,7 @@ func convertSummary(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSe Attributes: attrs, StartTime: t.StartTime, Time: p.Time, - Count: uint64(summary.Count), + Count: uint64(max(0, summary.Count)), // nolint:gosec // A count should never be negative. QuantileValues: convertQuantiles(summary.Snapshot), Sum: summary.Sum, } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go index 74dcc90b8dc..f9e16bedbcc 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc/span_context.go @@ -5,6 +5,7 @@ package otel2oc // import "go.opentelemetry.io/otel/bridge/opencensus/internal/o import ( octrace "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" "go.opentelemetry.io/otel/trace" ) @@ -15,9 +16,18 @@ func SpanContext(sc trace.SpanContext) octrace.SpanContext { // OpenCensus doesn't expose functions to directly set sampled to = 0x1 } + + entries := make([]tracestate.Entry, 0, sc.TraceState().Len()) + sc.TraceState().Walk(func(key, value string) bool { + entries = append(entries, tracestate.Entry{Key: key, Value: value}) + return true + }) + tsOc, _ := tracestate.New(nil, entries...) + return octrace.SpanContext{ TraceID: octrace.TraceID(sc.TraceID()), SpanID: octrace.SpanID(sc.SpanID()), TraceOptions: to, + Tracestate: tsOc, } } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go index 9e7ee39fb5b..3ddef4d037e 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/span.go @@ -61,7 +61,7 @@ func (s *Span) SetName(name string) { // SetStatus sets the status of this span, if it is recording events. func (s *Span) SetStatus(status octrace.Status) { - s.otelSpan.SetStatus(codes.Code(status.Code), status.Message) + s.otelSpan.SetStatus(codes.Code(max(0, status.Code)), status.Message) // nolint:gosec // Overflow checked. } // AddAttributes sets attributes in this span. @@ -128,5 +128,5 @@ func (s *Span) AddLink(l octrace.Link) { // String prints a string representation of this span. func (s *Span) String() string { - return fmt.Sprintf("span %s", s.otelSpan.SpanContext().SpanID().String()) + return "span " + s.otelSpan.SpanContext().SpanID().String() } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go index 2454f27091c..13164684525 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/internal/tracer.go @@ -37,7 +37,7 @@ func (o *Tracer) StartSpan(ctx context.Context, name string, s ...octrace.StartO // StartSpanWithRemoteParent starts a new child span of the span from the // given parent. func (o *Tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent octrace.SpanContext, s ...octrace.StartOption) (context.Context, *octrace.Span) { - // make sure span context is zero'd out so we use the remote parent + // make sure span context is zeroed out so we use the remote parent ctx = trace.ContextWithSpan(ctx, nil) ctx = trace.ContextWithRemoteSpanContext(ctx, oc2otel.SpanContext(parent)) return o.StartSpan(ctx, name, s...) diff --git a/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go b/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go index ba912bd2751..878dd5d6b79 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opencensus/version.go @@ -5,5 +5,5 @@ package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" // Version is the current release version of the opencensus bridge. func Version() string { - return "1.27.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go index 8aee5567f9e..848f028c87f 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/bridge.go @@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "context" "fmt" + "strconv" "strings" "sync" @@ -532,7 +533,7 @@ func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue { case int64: return key.Int64(val) case uint64: - return key.String(fmt.Sprintf("%d", val)) + return key.String(strconv.FormatUint(val, 10)) case float64: return key.Float64(val) case int8: @@ -552,7 +553,7 @@ func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue { case int: return key.Int(val) case uint: - return key.String(fmt.Sprintf("%d", val)) + return key.String(strconv.FormatUint(uint64(val), 10)) case string: return key.String(val) default: diff --git a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go index b069753f7e9..3f093e75fcf 100644 --- a/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go +++ b/vendor/go.opentelemetry.io/otel/bridge/opentracing/provider.go @@ -6,6 +6,7 @@ package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "sync" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) @@ -38,6 +39,8 @@ func NewTracerProvider(bridge *BridgeTracer, provider trace.TracerProvider) *Tra type wrappedTracerKey struct { name string version string + schema string + attrs attribute.Set } // Tracer creates a WrappedTracer that wraps the OpenTelemetry tracer for each call to @@ -51,6 +54,8 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T key := wrappedTracerKey{ name: name, version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if t, ok := p.tracers[key]; ok { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 2acbac35466..49a35b12255 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -5,6 +5,7 @@ package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" + "errors" "fmt" "strconv" ) @@ -63,7 +64,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return nil } if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") + return errors.New("nil receiver passed to UnmarshalJSON") } var x interface{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md new file mode 100644 index 00000000000..0b4603c8e40 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Log gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go new file mode 100644 index 00000000000..05abd92eeec --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/client.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +import ( + "context" + "fmt" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + collogpb "go.opentelemetry.io/proto/otlp/collector/logs/v1" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" +) + +// The methods of this type are not expected to be called concurrently. +type client struct { + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // ourConn keeps track of where conn was created: true if created here in + // NewClient, or false if passed with an option. This is important on + // Shutdown as conn should only be closed if we created it. Otherwise, + // it is up to the processes that passed conn to close it. + ourConn bool + conn *grpc.ClientConn + lsc collogpb.LogsServiceClient +} + +// Used for testing. +var newGRPCClientFn = grpc.NewClient + +// newClient creates a new gRPC log client. +func newClient(cfg config) (*client, error) { + c := &client{ + exportTimeout: cfg.timeout.Value, + requestFunc: cfg.retryCfg.Value.RequestFunc(retryable), + conn: cfg.gRPCConn.Value, + } + + if len(cfg.headers.Value) > 0 { + c.metadata = metadata.New(cfg.headers.Value) + } + + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + dialOpts := newGRPCDialOptions(cfg) + + conn, err := newGRPCClientFn(cfg.endpoint.Value, dialOpts...) + if err != nil { + return nil, err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + c.lsc = collogpb.NewLogsServiceClient(c.conn) + + return c, nil +} + +func newGRPCDialOptions(cfg config) []grpc.DialOption { + userAgent := "OTel Go OTLP over gRPC logs exporter/" + Version() + dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} + dialOpts = append(dialOpts, cfg.dialOptions.Value...) + + // Convert other grpc configs to the dial options. + // Service config + if cfg.serviceConfig.Value != "" { + dialOpts = append(dialOpts, grpc.WithDefaultServiceConfig(cfg.serviceConfig.Value)) + } + // Prioritize GRPCCredentials over Insecure (passing both is an error). + if cfg.gRPCCredentials.Value != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(cfg.gRPCCredentials.Value)) + } else if cfg.insecure.Value { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + dialOpts = append(dialOpts, grpc.WithTransportCredentials( + credentials.NewTLS(nil), + )) + } + // Compression + if cfg.compression.Value == GzipCompression { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + // Reconnection period + if cfg.reconnectionPeriod.Value != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.reconnectionPeriod.Value, + } + dialOpts = append(dialOpts, grpc.WithConnectParams(p)) + } + + return dialOpts +} + +// UploadLogs sends proto logs to connected endpoint. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +// +// The otlplog.Exporter synchronizes access to client methods, and +// ensures this is not called after the Exporter is shutdown. Only thing +// to do here is send data. +func (c *client) UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error { + select { + case <-ctx.Done(): + // Do not upload if the context is already expired. + return ctx.Err() + default: + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(ctx context.Context) error { + resp, err := c.lsc.Export(ctx, &collogpb.ExportLogsServiceRequest{ + ResourceLogs: rl, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedLogRecords() + if n != 0 || msg != "" { + err := fmt.Errorf("OTLP partial success: %s (%d log records rejected)", msg, n) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// Shutdown shuts down the client, freeing all resources. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// The otlplog.Exporter synchronizes access to client methods and +// ensures this is called only once. The only thing that needs to be done +// here is to release any computational resources the client holds. +func (c *client) Shutdown(ctx context.Context) error { + c.metadata = nil + c.requestFunc = nil + c.lsc = nil + + // Release the connection if we created it. + err := ctx.Err() + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + c.conn = nil + return err +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function based on the clients configured export timeout. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) + } + + return ctx, cancel +} + +type noopClient struct{} + +func newNoopClient() *noopClient { + return &noopClient{} +} + +func (c *noopClient) UploadLogs(context.Context, []*logpb.ResourceLogs) error { return nil } + +func (c *noopClient) Shutdown(context.Context) error { return nil } + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // Additionally, handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns if the status is RetryInfo +// and the duration to wait for if an explicit throttle time is included. +func throttleDelay(s *status.Status) (bool, time.Duration) { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return true, t.RetryDelay.AsDuration() + } + } + return false, 0 +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go new file mode 100644 index 00000000000..cd33a168271 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/config.go @@ -0,0 +1,653 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "os" + "strconv" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" +) + +// Default values. +var ( + defaultEndpoint = "localhost:4317" + defaultTimeout = 10 * time.Second + defaultRetryCfg = retry.DefaultConfig +) + +// Environment variable keys. +var ( + envEndpoint = []string{ + "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT", + "OTEL_EXPORTER_OTLP_ENDPOINT", + } + envInsecure = []string{ + "OTEL_EXPORTER_OTLP_LOGS_INSECURE", + "OTEL_EXPORTER_OTLP_INSECURE", + } + + envHeaders = []string{ + "OTEL_EXPORTER_OTLP_LOGS_HEADERS", + "OTEL_EXPORTER_OTLP_HEADERS", + } + + envCompression = []string{ + "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION", + "OTEL_EXPORTER_OTLP_COMPRESSION", + } + + envTimeout = []string{ + "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT", + "OTEL_EXPORTER_OTLP_TIMEOUT", + } + + envTLSCert = []string{ + "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE", + "OTEL_EXPORTER_OTLP_CERTIFICATE", + } + envTLSClient = []struct { + Certificate string + Key string + }{ + { + "OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE", + "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY", + }, + { + "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE", + "OTEL_EXPORTER_OTLP_CLIENT_KEY", + }, + } +) + +type fnOpt func(config) config + +func (f fnOpt) applyOption(c config) config { return f(c) } + +// Option applies an option to the Exporter. +type Option interface { + applyOption(config) config +} + +type config struct { + endpoint setting[string] + insecure setting[bool] + tlsCfg setting[*tls.Config] + headers setting[map[string]string] + compression setting[Compression] + timeout setting[time.Duration] + retryCfg setting[retry.Config] + + // gRPC configurations + gRPCCredentials setting[credentials.TransportCredentials] + serviceConfig setting[string] + reconnectionPeriod setting[time.Duration] + dialOptions setting[[]grpc.DialOption] + gRPCConn setting[*grpc.ClientConn] +} + +func newConfig(options []Option) config { + var c config + for _, opt := range options { + c = opt.applyOption(c) + } + + // Apply environment value and default value + c.endpoint = c.endpoint.Resolve( + getEnv[string](envEndpoint, convEndpoint), + fallback[string](defaultEndpoint), + ) + c.insecure = c.insecure.Resolve( + loadInsecureFromEnvEndpoint(envEndpoint), + getEnv[bool](envInsecure, convInsecure), + ) + c.tlsCfg = c.tlsCfg.Resolve( + loadEnvTLS[*tls.Config](), + ) + c.headers = c.headers.Resolve( + getEnv[map[string]string](envHeaders, convHeaders), + ) + c.compression = c.compression.Resolve( + getEnv[Compression](envCompression, convCompression), + ) + c.timeout = c.timeout.Resolve( + getEnv[time.Duration](envTimeout, convDuration), + fallback[time.Duration](defaultTimeout), + ) + c.retryCfg = c.retryCfg.Resolve( + fallback[retry.Config](defaultRetryCfg), + ) + + return c +} + +// RetryConfig defines configuration for retrying the export of log data +// that failed. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +// WithInsecure disables client transport security for the Exporter's gRPC +// connection, just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return fnOpt(func(c config) config { + c.insecure = newSetting(true) + return c + }) +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return fnOpt(func(c config) config { + c.endpoint = newSetting(endpoint) + return c + }) +} + +// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(rawURL string) Option { + u, err := url.Parse(rawURL) + if err != nil { + global.Error(err, "otlplog: parse endpoint url", "url", rawURL) + return fnOpt(func(c config) config { return c }) + } + return fnOpt(func(c config) config { + c.endpoint = newSetting(u.Host) + c.insecure = insecureFromScheme(c.insecure, u.Scheme) + return c + }) +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return fnOpt(func(c config) config { + c.reconnectionPeriod = newSetting(rp) + return c + }) +} + +// Compression describes the compression used for exported payloads. +type Compression int + +const ( + // NoCompression represents that no compression should be used. + NoCompression Compression = iota + // GzipCompression represents that gzip compression should be used. + GzipCompression +) + +// WithCompressor sets the compressor the gRPC client uses. +// Supported compressor values: "gzip". +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_LOGS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compression strategy will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithCompressor(compressor string) Option { + return fnOpt(func(c config) config { + c.compression = newSetting(compressorToCompression(compressor)) + return c + }) +} + +// WithHeaders will send the provided headers with each gRPC requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_LOGS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_LOGS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + return fnOpt(func(c config) config { + c.headers = newSetting(headers) + return c + }) +} + +// WithTLSCredentials sets the gRPC connection to use creds. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no TLS credentials will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(credential credentials.TransportCredentials) Option { + return fnOpt(func(c config) config { + c.gRPCCredentials = newSetting(credential) + return c + }) +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return fnOpt(func(c config) config { + c.serviceConfig = newSetting(serviceConfig) + return c + }) +} + +// WithDialOption sets explicit grpc.DialOptions to use when establishing a +// gRPC connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return fnOpt(func(c config) config { + c.dialOptions = newSetting(opts) + return c + }) +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The Exporter +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return fnOpt(func(c config) config { + c.gRPCConn = newSetting(conn) + return c + }) +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the log +// data is dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_LOGS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_LOGS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + return fnOpt(func(c config) config { + c.timeout = newSetting(duration) + return c + }) +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(rc RetryConfig) Option { + return fnOpt(func(c config) config { + c.retryCfg = newSetting(retry.Config(rc)) + return c + }) +} + +// convCompression returns the parsed compression encoded in s. NoCompression +// and an errors are returned if s is unknown. +func convCompression(s string) (Compression, error) { + switch s { + case "gzip": + return GzipCompression, nil + case "none", "": + return NoCompression, nil + } + return NoCompression, fmt.Errorf("unknown compression: %s", s) +} + +// convEndpoint converts s from a URL string to an endpoint if s is a valid +// URL. Otherwise, "" and an error are returned. +func convEndpoint(s string) (string, error) { + u, err := url.Parse(s) + if err != nil { + return "", err + } + return u.Host, nil +} + +// convInsecure converts s from string to bool without case sensitivity. +// If s is not valid returns error. +func convInsecure(s string) (bool, error) { + s = strings.ToLower(s) + if s != "true" && s != "false" { + return false, fmt.Errorf("can't convert %q to bool", s) + } + + return s == "true", nil +} + +// loadInsecureFromEnvEndpoint returns a resolver that fetches +// insecure setting from envEndpoint is it possible. +func loadInsecureFromEnvEndpoint(envEndpoint []string) resolver[bool] { + return func(s setting[bool]) setting[bool] { + if s.Set { + // Passed, valid, options have precedence. + return s + } + + for _, key := range envEndpoint { + if vStr := os.Getenv(key); vStr != "" { + u, err := url.Parse(vStr) + if err != nil { + otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err)) + continue + } + + return insecureFromScheme(s, u.Scheme) + } + } + return s + } +} + +// convHeaders converts the OTel environment variable header value s into a +// mapping of header key to value. If s is invalid a partial result and error +// are returned. +func convHeaders(s string) (map[string]string, error) { + out := make(map[string]string) + var err error + for _, header := range strings.Split(s, ",") { + rawKey, rawVal, found := strings.Cut(header, "=") + if !found { + err = errors.Join(err, fmt.Errorf("invalid header: %s", header)) + continue + } + + escKey, e := url.PathUnescape(rawKey) + if e != nil { + err = errors.Join(err, fmt.Errorf("invalid header key: %s", rawKey)) + continue + } + key := strings.TrimSpace(escKey) + + escVal, e := url.PathUnescape(rawVal) + if e != nil { + err = errors.Join(err, fmt.Errorf("invalid header value: %s", rawVal)) + continue + } + val := strings.TrimSpace(escVal) + + out[key] = val + } + return out, err +} + +// convDuration converts s into a duration of milliseconds. If s does not +// contain an integer, 0 and an error are returned. +func convDuration(s string) (time.Duration, error) { + d, err := strconv.Atoi(s) + if err != nil { + return 0, err + } + // OTel durations are defined in milliseconds. + return time.Duration(d) * time.Millisecond, nil +} + +// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by +// the OTLP TLS environment variables. This will load both the rootCAs and +// certificates used for mTLS. +// +// If the filepath defined is invalid or does not contain valid TLS files, an +// error is passed to the OTel ErrorHandler and no TLS configuration is +// provided. +func loadEnvTLS[T *tls.Config]() resolver[T] { + return func(s setting[T]) setting[T] { + if s.Set { + // Passed, valid, options have precedence. + return s + } + + var rootCAs *x509.CertPool + var err error + for _, key := range envTLSCert { + if v := os.Getenv(key); v != "" { + rootCAs, err = loadCertPool(v) + break + } + } + + var certs []tls.Certificate + for _, pair := range envTLSClient { + cert := os.Getenv(pair.Certificate) + key := os.Getenv(pair.Key) + if cert != "" && key != "" { + var e error + certs, e = loadCertificates(cert, key) + err = errors.Join(err, e) + break + } + } + + if err != nil { + err = fmt.Errorf("failed to load TLS: %w", err) + otel.Handle(err) + } else if rootCAs != nil || certs != nil { + s.Set = true + s.Value = &tls.Config{RootCAs: rootCAs, Certificates: certs} + } + return s + } +} + +// readFile is used for testing. +var readFile = os.ReadFile + +// loadCertPool loads and returns the *x509.CertPool found at path if it exists +// and is valid. Otherwise, nil and an error is returned. +func loadCertPool(path string) (*x509.CertPool, error) { + b, err := readFile(path) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(b); !ok { + return nil, errors.New("certificate not added") + } + return cp, nil +} + +// loadCertificates loads and returns the tls.Certificate found at path if it +// exists and is valid. Otherwise, nil and an error is returned. +func loadCertificates(certPath, keyPath string) ([]tls.Certificate, error) { + cert, err := readFile(certPath) + if err != nil { + return nil, err + } + key, err := readFile(keyPath) + if err != nil { + return nil, err + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + return []tls.Certificate{crt}, nil +} + +// insecureFromScheme return setting if the connection should +// use client transport security or not. +// Empty scheme doesn't force insecure setting. +func insecureFromScheme(prev setting[bool], scheme string) setting[bool] { + if scheme == "https" { + return newSetting(false) + } else if len(scheme) > 0 { + return newSetting(true) + } + + return prev +} + +func compressorToCompression(compressor string) Compression { + c, err := convCompression(compressor) + if err != nil { + otel.Handle(fmt.Errorf("%w, using no compression as default", err)) + return NoCompression + } + + return c +} + +// setting is a configuration setting value. +type setting[T any] struct { + Value T + Set bool +} + +// newSetting returns a new setting with the value set. +func newSetting[T any](value T) setting[T] { + return setting[T]{Value: value, Set: true} +} + +// resolver returns an updated setting after applying an resolution operation. +type resolver[T any] func(setting[T]) setting[T] + +// Resolve returns a resolved version of s. +// +// It will apply all the passed fn in the order provided, chaining together the +// return setting to the next input. The setting s is used as the initial +// argument to the first fn. +// +// Each fn needs to validate if it should apply given the Set state of the +// setting. This will not perform any checks on the set state when chaining +// function. +func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] { + for _, f := range fn { + s = f(s) + } + return s +} + +// getEnv returns a resolver that will apply an environment variable value +// associated with the first set key to a setting value. The conv function is +// used to convert between the environment variable value and the setting type. +// +// If the input setting to the resolver is set, the environment variable will +// not be applied. +// +// Any error returned from conv is sent to the OTel ErrorHandler and the +// setting will not be updated. +func getEnv[T any](keys []string, conv func(string) (T, error)) resolver[T] { + return func(s setting[T]) setting[T] { + if s.Set { + // Passed, valid, options have precedence. + return s + } + + for _, key := range keys { + if vStr := os.Getenv(key); vStr != "" { + v, err := conv(vStr) + if err == nil { + s.Value = v + s.Set = true + break + } + otel.Handle(fmt.Errorf("invalid %s value %s: %w", key, vStr, err)) + } + } + return s + } +} + +// fallback returns a resolve that will set a setting value to val if it is not +// already set. +// +// This is usually passed at the end of a resolver chain to ensure a default is +// applied if the setting has not already been set. +func fallback[T any](val T) resolver[T] { + return func(s setting[T]) setting[T] { + if !s.Set { + s.Value = val + s.Set = true + } + return s + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go new file mode 100644 index 00000000000..67cb814342d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/doc.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlploggrpc provides an OTLP log exporter using gRPC. The exporter uses gRPC to +transport OTLP protobuf payloads. + +All Exporters must be created with [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_LOGS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_LOGS_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without scheme. +OTEL_EXPORTER_OTLP_LOGS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_LOGS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_LOGS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_LOGS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go new file mode 100644 index 00000000000..66895c3a1a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/exporter.go @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" + "go.opentelemetry.io/otel/sdk/log" + logpb "go.opentelemetry.io/proto/otlp/logs/v1" +) + +type logClient interface { + UploadLogs(ctx context.Context, rl []*logpb.ResourceLogs) error + Shutdown(context.Context) error +} + +// Exporter is a OpenTelemetry log Exporter. It transports log data encoded as +// OTLP protobufs using gRPC. +// All Exporters must be created with [New]. +type Exporter struct { + // Ensure synchronous access to the client across all functionality. + clientMu sync.Mutex + client logClient + + stopped atomic.Bool +} + +// Compile-time check Exporter implements [log.Exporter]. +var _ log.Exporter = (*Exporter)(nil) + +// New returns a new [Exporter]. +// +// It is recommended to use it with a [BatchProcessor] +// or other processor exporting records asynchronously. +func New(_ context.Context, options ...Option) (*Exporter, error) { + cfg := newConfig(options) + c, err := newClient(cfg) + if err != nil { + return nil, err + } + return newExporter(c), nil +} + +func newExporter(c logClient) *Exporter { + var e Exporter + e.client = c + return &e +} + +var transformResourceLogs = transform.ResourceLogs + +// Export transforms and transmits log records to an OTLP receiver. +// +// This method returns nil and drops records if called after Shutdown. +// This method returns an error if the method is canceled by the passed context. +func (e *Exporter) Export(ctx context.Context, records []log.Record) error { + if e.stopped.Load() { + return nil + } + + otlp := transformResourceLogs(records) + if otlp == nil { + return nil + } + + e.clientMu.Lock() + defer e.clientMu.Unlock() + return e.client.UploadLogs(ctx, otlp) +} + +// Shutdown shuts down the Exporter. Calls to Export or ForceFlush will perform +// no operation after this is called. +func (e *Exporter) Shutdown(ctx context.Context) error { + if e.stopped.Swap(true) { + return nil + } + + e.clientMu.Lock() + defer e.clientMu.Unlock() + + err := e.client.Shutdown(ctx) + e.client = newNoopClient() + return err +} + +// ForceFlush does nothing. The Exporter holds no state. +func (e *Exporter) ForceFlush(ctx context.Context) error { + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go new file mode 100644 index 00000000000..f2da12382a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %w", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go new file mode 100644 index 00000000000..03e9d8c71cd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform/log.go @@ -0,0 +1,390 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlplog/transform/log.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package transform provides transformation functionality from the +// sdk/log data-types into OTLP data-types. +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform" + +import ( + "time" + + cpb "go.opentelemetry.io/proto/otlp/common/v1" + lpb "go.opentelemetry.io/proto/otlp/logs/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" + + "go.opentelemetry.io/otel/attribute" + api "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log" +) + +// ResourceLogs returns an slice of OTLP ResourceLogs generated from records. +func ResourceLogs(records []log.Record) []*lpb.ResourceLogs { + if len(records) == 0 { + return nil + } + + resMap := make(map[attribute.Distinct]*lpb.ResourceLogs) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + scopeMap := make(map[key]*lpb.ScopeLogs) + + var resources int + for _, r := range records { + res := r.Resource() + rKey := res.Equivalent() + scope := r.InstrumentationScope() + k := key{ + r: rKey, + is: scope, + } + sl, iOk := scopeMap[k] + if !iOk { + sl = new(lpb.ScopeLogs) + var emptyScope instrumentation.Scope + if scope != emptyScope { + sl.Scope = &cpb.InstrumentationScope{ + Name: scope.Name, + Version: scope.Version, + Attributes: AttrIter(scope.Attributes.Iter()), + } + sl.SchemaUrl = scope.SchemaURL + } + scopeMap[k] = sl + } + + sl.LogRecords = append(sl.LogRecords, LogRecord(r)) + rl, rOk := resMap[rKey] + if !rOk { + resources++ + rl = new(lpb.ResourceLogs) + if res.Len() > 0 { + rl.Resource = &rpb.Resource{ + Attributes: AttrIter(res.Iter()), + } + } + rl.SchemaUrl = res.SchemaURL() + resMap[rKey] = rl + } + if !iOk { + rl.ScopeLogs = append(rl.ScopeLogs, sl) + } + } + + // Transform the categorized map into a slice + resLogs := make([]*lpb.ResourceLogs, 0, resources) + for _, rl := range resMap { + resLogs = append(resLogs, rl) + } + + return resLogs +} + +// LogRecord returns an OTLP LogRecord generated from record. +func LogRecord(record log.Record) *lpb.LogRecord { + r := &lpb.LogRecord{ + TimeUnixNano: timeUnixNano(record.Timestamp()), + ObservedTimeUnixNano: timeUnixNano(record.ObservedTimestamp()), + SeverityNumber: SeverityNumber(record.Severity()), + SeverityText: record.SeverityText(), + Body: LogAttrValue(record.Body()), + Attributes: make([]*cpb.KeyValue, 0, record.AttributesLen()), + Flags: uint32(record.TraceFlags()), + // TODO: DroppedAttributesCount: /* ... */, + } + record.WalkAttributes(func(kv api.KeyValue) bool { + r.Attributes = append(r.Attributes, LogAttr(kv)) + return true + }) + if tID := record.TraceID(); tID.IsValid() { + r.TraceId = tID[:] + } + if sID := record.SpanID(); sID.IsValid() { + r.SpanId = sID[:] + } + return r +} + +// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC as uint64. The result is undefined if the Unix +// time in nanoseconds cannot be represented by an int64 (a date before the +// year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The +// result does not depend on the location associated with t. +func timeUnixNano(t time.Time) uint64 { + nano := t.UnixNano() + if nano < 0 { + return 0 + } + return uint64(nano) // nolint:gosec // Overflow checked. +} + +// AttrIter transforms an [attribute.Iterator] into OTLP key-values. +func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, Attr(iter.Attribute())) + } + return out +} + +// Attrs transforms a slice of [attribute.KeyValue] into OTLP key-values. +func Attrs(attrs []attribute.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, Attr(kv)) + } + return out +} + +// Attr transforms an [attribute.KeyValue] into an OTLP key-value. +func Attr(kv attribute.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{Key: string(kv.Key), Value: AttrValue(kv.Value)} +} + +// AttrValue transforms an [attribute.Value] into an OTLP AnyValue. +func AttrValue(v attribute.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} + +// Attrs transforms a slice of [api.KeyValue] into OTLP key-values. +func LogAttrs(attrs []api.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, LogAttr(kv)) + } + return out +} + +// LogAttr transforms an [api.KeyValue] into an OTLP key-value. +func LogAttr(attr api.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{ + Key: attr.Key, + Value: LogAttrValue(attr.Value), + } +} + +// LogAttrValues transforms a slice of [api.Value] into an OTLP []AnyValue. +func LogAttrValues(vals []api.Value) []*cpb.AnyValue { + if len(vals) == 0 { + return nil + } + + out := make([]*cpb.AnyValue, 0, len(vals)) + for _, v := range vals { + out = append(out, LogAttrValue(v)) + } + return out +} + +// LogAttrValue transforms an [api.Value] into an OTLP AnyValue. +func LogAttrValue(v api.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Kind() { + case api.KindBool: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case api.KindInt64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case api.KindFloat64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case api.KindString: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case api.KindBytes: + av.Value = &cpb.AnyValue_BytesValue{ + BytesValue: v.AsBytes(), + } + case api.KindSlice: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: LogAttrValues(v.AsSlice()), + }, + } + case api.KindMap: + av.Value = &cpb.AnyValue_KvlistValue{ + KvlistValue: &cpb.KeyValueList{ + Values: LogAttrs(v.AsMap()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +// SeverityNumber transforms a [log.Severity] into an OTLP SeverityNumber. +func SeverityNumber(s api.Severity) lpb.SeverityNumber { + switch s { + case api.SeverityTrace: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE + case api.SeverityTrace2: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE2 + case api.SeverityTrace3: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE3 + case api.SeverityTrace4: + return lpb.SeverityNumber_SEVERITY_NUMBER_TRACE4 + case api.SeverityDebug: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG + case api.SeverityDebug2: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG2 + case api.SeverityDebug3: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG3 + case api.SeverityDebug4: + return lpb.SeverityNumber_SEVERITY_NUMBER_DEBUG4 + case api.SeverityInfo: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO + case api.SeverityInfo2: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO2 + case api.SeverityInfo3: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO3 + case api.SeverityInfo4: + return lpb.SeverityNumber_SEVERITY_NUMBER_INFO4 + case api.SeverityWarn: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN + case api.SeverityWarn2: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN2 + case api.SeverityWarn3: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN3 + case api.SeverityWarn4: + return lpb.SeverityNumber_SEVERITY_NUMBER_WARN4 + case api.SeverityError: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR + case api.SeverityError2: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR2 + case api.SeverityError3: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR3 + case api.SeverityError4: + return lpb.SeverityNumber_SEVERITY_NUMBER_ERROR4 + case api.SeverityFatal: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL + case api.SeverityFatal2: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL2 + case api.SeverityFatal3: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL3 + case api.SeverityFatal4: + return lpb.SeverityNumber_SEVERITY_NUMBER_FATAL4 + } + return lpb.SeverityNumber_SEVERITY_NUMBER_UNSPECIFIED +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go new file mode 100644 index 00000000000..f4079072de2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlploggrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + +// Version is the current release version of the OpenTelemetry OTLP over gRPC logs exporter in use. +func Version() string { + return "0.9.0" +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go index 04d46d42ea0..279b4be4f60 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/client.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "sync" "time" @@ -143,15 +144,20 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) resp, err := c.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { - return newResponseError(http.Header{}) + return newResponseError(http.Header{}, err) } if err != nil { return err } + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } - var rErr error - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. @@ -179,26 +185,34 @@ func (c *httpClient) uploadLogs(ctx context.Context, data []*logpb.ResourceLogs) } } return nil - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failure. - rErr = newResponseError(resp.Header) - - // Going to retry, drain the body to reuse the connection. - if _, err := io.Copy(io.Discard, resp.Body); err != nil { - _ = resp.Body.Close() - return err - } - default: - rErr = fmt.Errorf("failed to send logs to %s: %s", request.URL, resp.Status) } - - if err := resp.Body.Close(); err != nil { + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { return err } - return rErr + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) + default: + // Non-retryable failure. + return fmt.Errorf("failed to send logs to %s: %s (%w)", request.URL, resp.Status, bodyErr) + } }) } @@ -266,24 +280,50 @@ func (r *request) reset(ctx context.Context) { // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 + err error } // newResponseError returns a retryableError and will extract any explicit -// throttle delay contained in headers. -func newResponseError(header http.Header) error { +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { var rErr retryableError if v := header.Get("Retry-After"); v != "" { if t, err := strconv.ParseInt(v, 10, 64); err == nil { rErr.throttle = t } } + + rErr.err = wrapped return rErr } func (e retryableError) Error() string { + if e.err != nil { + return fmt.Sprintf("retry-able request failure: %v", e.err.Error()) + } + return "retry-able request failure" } +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go index 383890d91d5..bfe768091e3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/config.go @@ -143,8 +143,12 @@ func newConfig(options []Option) config { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "localhost:4318" will be used. @@ -159,8 +163,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_LOGS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_LOGS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. @@ -178,11 +183,7 @@ func WithEndpointURL(rawURL string) Option { return fnOpt(func(c config) config { c.endpoint = newSetting(u.Host) c.path = newSetting(u.Path) - if u.Scheme != "https" { - c.insecure = newSetting(true) - } else { - c.insecure = newSetting(false) - } + c.insecure = newSetting(u.Scheme != "https") return c }) } @@ -372,7 +373,7 @@ func (s setting[T]) Resolve(fn ...resolver[T]) setting[T] { return s } -// loadEnvTLS returns a resolver that loads a *tls.Config from files defeind by +// loadEnvTLS returns a resolver that loads a *tls.Config from files defined by // the OTLP TLS environment variables. This will load both the rootCAs and // certificates used for mTLS. // diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go index 87e55efe091..2607e3b9b68 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/doc.go @@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. -The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go index deee36c001c..f1c8d3ae0a7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/exporter.go @@ -23,6 +23,9 @@ type Exporter struct { var _ log.Exporter = (*Exporter)(nil) // New returns a new [Exporter]. +// +// It is recommended to use it with a [BatchProcessor] +// or other processor exporting records asynchronously. func New(_ context.Context, options ...Option) (*Exporter, error) { cfg := newConfig(options) c, err := newHTTPClient(cfg) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go index 232172eb6b9..4b52a14ebc6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform/log.go @@ -9,7 +9,6 @@ package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform" import ( - "sync" "time" cpb "go.opentelemetry.io/proto/otlp/common/v1" @@ -28,31 +27,42 @@ func ResourceLogs(records []log.Record) []*lpb.ResourceLogs { return nil } - resMap := resourceLogsMapPool.Get().(map[attribute.Distinct]*lpb.ResourceLogs) - defer func() { - clear(resMap) - resourceLogsMapPool.Put(resMap) - }() - resourceLogsMap(&resMap, records) + resMap := make(map[attribute.Distinct]*lpb.ResourceLogs) - out := make([]*lpb.ResourceLogs, 0, len(resMap)) - for _, rl := range resMap { - out = append(out, rl) + type key struct { + r attribute.Distinct + is instrumentation.Scope } - return out -} - -var resourceLogsMapPool = sync.Pool{ - New: func() any { - return make(map[attribute.Distinct]*lpb.ResourceLogs) - }, -} + scopeMap := make(map[key]*lpb.ScopeLogs) -func resourceLogsMap(dst *map[attribute.Distinct]*lpb.ResourceLogs, records []log.Record) { + var resources int for _, r := range records { res := r.Resource() - rl, ok := (*dst)[res.Equivalent()] - if !ok { + rKey := res.Equivalent() + scope := r.InstrumentationScope() + k := key{ + r: rKey, + is: scope, + } + sl, iOk := scopeMap[k] + if !iOk { + sl = new(lpb.ScopeLogs) + var emptyScope instrumentation.Scope + if scope != emptyScope { + sl.Scope = &cpb.InstrumentationScope{ + Name: scope.Name, + Version: scope.Version, + Attributes: AttrIter(scope.Attributes.Iter()), + } + sl.SchemaUrl = scope.SchemaURL + } + scopeMap[k] = sl + } + + sl.LogRecords = append(sl.LogRecords, LogRecord(r)) + rl, rOk := resMap[rKey] + if !rOk { + resources++ rl = new(lpb.ResourceLogs) if res.Len() > 0 { rl.Resource = &rpb.Resource{ @@ -60,52 +70,20 @@ func resourceLogsMap(dst *map[attribute.Distinct]*lpb.ResourceLogs, records []lo } } rl.SchemaUrl = res.SchemaURL() - (*dst)[res.Equivalent()] = rl + resMap[rKey] = rl + } + if !iOk { + rl.ScopeLogs = append(rl.ScopeLogs, sl) } - rl.ScopeLogs = ScopeLogs(records) } -} -// ScopeLogs returns a slice of OTLP ScopeLogs generated from recoreds. -func ScopeLogs(records []log.Record) []*lpb.ScopeLogs { - scopeMap := scopeLogsMapPool.Get().(map[instrumentation.Scope]*lpb.ScopeLogs) - defer func() { - clear(scopeMap) - scopeLogsMapPool.Put(scopeMap) - }() - scopeLogsMap(&scopeMap, records) - - out := make([]*lpb.ScopeLogs, 0, len(scopeMap)) - for _, sl := range scopeMap { - out = append(out, sl) + // Transform the categorized map into a slice + resLogs := make([]*lpb.ResourceLogs, 0, resources) + for _, rl := range resMap { + resLogs = append(resLogs, rl) } - return out -} -var scopeLogsMapPool = sync.Pool{ - New: func() any { - return make(map[instrumentation.Scope]*lpb.ScopeLogs) - }, -} - -func scopeLogsMap(dst *map[instrumentation.Scope]*lpb.ScopeLogs, records []log.Record) { - for _, r := range records { - scope := r.InstrumentationScope() - sl, ok := (*dst)[scope] - if !ok { - sl = new(lpb.ScopeLogs) - var emptyScope instrumentation.Scope - if scope != emptyScope { - sl.Scope = &cpb.InstrumentationScope{ - Name: scope.Name, - Version: scope.Version, - } - sl.SchemaUrl = scope.SchemaURL - } - (*dst)[scope] = sl - } - sl.LogRecords = append(sl.LogRecords, LogRecord(r)) - } + return resLogs } // LogRecord returns an OTLP LogRecord generated from record. @@ -139,10 +117,11 @@ func LogRecord(record log.Record) *lpb.LogRecord { // year 1678 or after 2262). timeUnixNano on the zero Time returns 0. The // result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { - if t.IsZero() { + nano := t.UnixNano() + if nano < 0 { return 0 } - return uint64(t.UnixNano()) + return uint64(nano) // nolint:gosec // Overflow checked. } // AttrIter transforms an [attribute.Iterator] into OTLP key-values. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go index 48deb18678c..c83f73e02ba 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/version.go @@ -5,5 +5,5 @@ package otlploghttp // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/o // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf logs exporter in use. func Version() string { - return "0.4.0" + return "0.9.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 428cfea2334..e0fa0570a81 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -155,7 +155,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } return ctx, cancel diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go index 38d7d60d403..db6e3714b3b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -66,8 +66,9 @@ func WithInsecure() Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -84,8 +85,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go index 3d74ef1a01d..dcd8de5df4e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go index 98afc0b1e9d..3977c1f8a6c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go @@ -5,6 +5,7 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme import ( "context" + "errors" "fmt" "sync" @@ -114,7 +115,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error { return err } -var errShutdown = fmt.Errorf("gRPC exporter is shutdown") +var errShutdown = errors.New("gRPC exporter is shutdown") type shutdownClient struct{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go index b2735ba923c..261f5502682 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index b6ed9a2bb65..2ac8db5a887 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -111,7 +111,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { @@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Metrics.Endpoint = u.Host cfg.Metrics.URLPath = u.Path - if u.Scheme != "https" { - cfg.Metrics.Insecure = true - } + cfg.Metrics.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go index 0229ac80bef..03e7fbcdfb5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go @@ -14,7 +14,7 @@ import ( ) // ReadTLSConfigFromFile reads a PEM certificate file and creates -// a tls.Config that will use this certifate to verify a server certificate. +// a tls.Config that will use this certificate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go index 975e3b7aa1a..abf7f021960 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go @@ -46,8 +46,9 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ - Name: sm.Scope.Name, - Version: sm.Scope.Version, + Name: sm.Scope.Name, + Version: sm.Scope.Version, + Attributes: AttrIter(sm.Scope.Attributes.Iter()), }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, @@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { } switch a := m.Data.(type) { case metricdata.Gauge[int64]: - out.Data = Gauge[int64](a) + out.Data = Gauge(a) case metricdata.Gauge[float64]: - out.Data = Gauge[float64](a) + out.Data = Gauge(a) case metricdata.Sum[int64]: - out.Data, err = Sum[int64](a) + out.Data, err = Sum(a) case metricdata.Sum[float64]: - out.Data, err = Sum[float64](a) + out.Data, err = Sum(a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: @@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - return uint64(t.UnixNano()) + return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked. } // Exemplars returns a slice of OTLP Exemplars generated from exemplars. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index a731860f5c0..d45d6981f8a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -5,5 +5,5 @@ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "1.28.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 205594b7f34..86da30e3754 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "sync" "time" @@ -146,15 +147,20 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou resp, err := c.httpClient.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { - return newResponseError(http.Header{}) + return newResponseError(http.Header{}, err) } if err != nil { return err } + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } - var rErr error - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. @@ -182,26 +188,34 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } } return nil - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failure. - rErr = newResponseError(resp.Header) - - // Going to retry, drain the body to reuse the connection. - if _, err := io.Copy(io.Discard, resp.Body); err != nil { - _ = resp.Body.Close() - return err - } - default: - rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status) } - - if err := resp.Body.Close(); err != nil { + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { return err } - return rErr + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) + default: + // Non-retryable failure. + return fmt.Errorf("failed to send metrics to %s: %s (%w)", request.URL, resp.Status, bodyErr) + } }) } @@ -269,24 +283,50 @@ func (r *request) reset(ctx context.Context) { // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 + err error } // newResponseError returns a retryableError and will extract any explicit -// throttle delay contained in headers. -func newResponseError(header http.Header) error { +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { var rErr retryableError if v := header.Get("Retry-After"); v != "" { if t, err := strconv.ParseInt(v, 10, 64); err == nil { rErr.throttle = t } } + + rErr.err = wrapped return rErr } func (e retryableError) Error() string { + if e.err != nil { + return "retry-able request failure: " + e.err.Error() + } + return "retry-able request failure" } +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go index 4e08d9293da..bf05adcf1b1 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/config.go @@ -63,8 +63,9 @@ func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "localhost:4318" will be used. @@ -76,8 +77,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go index eabb82b9847..de9e71a6e35 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go @@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. -The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go index 701deb6d390..50ac8f86ea3 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go @@ -5,6 +5,7 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme import ( "context" + "errors" "fmt" "sync" @@ -114,7 +115,7 @@ func (e *Exporter) Shutdown(ctx context.Context) error { return err } -var errShutdown = fmt.Errorf("HTTP exporter is shutdown") +var errShutdown = errors.New("HTTP exporter is shutdown") type shutdownClient struct{} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go index 35885ba8a72..7ac42759f6c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go index 9bbf0941f94..db595e49ec2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -111,7 +111,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -139,7 +139,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { @@ -287,9 +287,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Metrics.Endpoint = u.Host cfg.Metrics.URLPath = u.Path - if u.Scheme != "https" { - cfg.Metrics.Insecure = true - } + cfg.Metrics.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go index ae09ad57e16..f603dc605eb 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go @@ -14,7 +14,7 @@ import ( ) // ReadTLSConfigFromFile reads a PEM certificate file and creates -// a tls.Config that will use this certifate to verify a server certificate. +// a tls.Config that will use this certificate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go index 0a1a65c44d2..8207b15a421 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go @@ -46,8 +46,9 @@ func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ - Name: sm.Scope.Name, - Version: sm.Scope.Version, + Name: sm.Scope.Name, + Version: sm.Scope.Version, + Attributes: AttrIter(sm.Scope.Attributes.Iter()), }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, @@ -83,13 +84,13 @@ func metric(m metricdata.Metrics) (*mpb.Metric, error) { } switch a := m.Data.(type) { case metricdata.Gauge[int64]: - out.Data = Gauge[int64](a) + out.Data = Gauge(a) case metricdata.Gauge[float64]: - out.Data = Gauge[float64](a) + out.Data = Gauge(a) case metricdata.Sum[int64]: - out.Data, err = Sum[int64](a) + out.Data, err = Sum(a) case metricdata.Sum[float64]: - out.Data, err = Sum[float64](a) + out.Data, err = Sum(a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: @@ -279,10 +280,7 @@ func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - return uint64(t.UnixNano()) + return uint64(max(0, t.UnixNano())) // nolint:gosec // Overflow checked. } // Exemplars returns a slice of OTLP Exemplars generated from exemplars. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 88e84ca892d..52247fc6405 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -5,5 +5,5 @@ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpme // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { - return "1.28.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc90..2e7690e43a2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index c3c69c5a0d6..bf27ef0220e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -4,6 +4,8 @@ package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( + "math" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" @@ -95,16 +97,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), - StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), - EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), - DroppedAttributesCount: uint32(sd.DroppedAttributes()), - DroppedEventsCount: uint32(sd.DroppedEvents()), - DroppedLinksCount: uint32(sd.DroppedLinks()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { @@ -115,6 +117,16 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { return s } +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode @@ -153,7 +165,7 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), - DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), Flags: flags, }) } @@ -166,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 { flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK } - return uint32(flags) + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -180,9 +192,9 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, - TimeUnixNano: uint64(es[i].Time.UnixNano()), + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. Attributes: KeyValues(es[i].Attributes), - DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } } return events diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927de..2171bee3c84 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go index e783b57ac4b..b7bd429ffdf 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -12,9 +12,8 @@ The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. -The value must contain a host. -The value may additionally a port, a scheme, and a path. -The value accepts "http" and "https" scheme. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go index 9513c0a57ca..4abf48d1f62 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8f84a799632..0a317d92637 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 1e59ff23932..ef1cf330567 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "strconv" + "strings" "sync" "time" @@ -151,7 +152,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc resp, err := d.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { - return newResponseError(http.Header{}) + return newResponseError(http.Header{}, err) } if err != nil { return err @@ -165,8 +166,7 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc }() } - switch sc := resp.StatusCode; { - case sc >= 200 && sc <= 299: + if sc := resp.StatusCode; sc >= 200 && sc <= 299 { // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer @@ -193,18 +193,33 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc } } return nil - - case sc == http.StatusTooManyRequests, - sc == http.StatusBadGateway, - sc == http.StatusServiceUnavailable, - sc == http.StatusGatewayTimeout: - // Retry-able failures. Drain the body to reuse the connection. - if _, err := io.Copy(io.Discard, resp.Body); err != nil { - otel.Handle(err) - } - return newResponseError(resp.Header) + } + // Error cases. + + // server may return a message with the response + // body, so we read it to include in the error + // message to be returned. It will help in + // debugging the actual issue. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { + return err + } + respStr := strings.TrimSpace(respData.String()) + if len(respStr) == 0 { + respStr = "(empty)" + } + bodyErr := fmt.Errorf("body: %s", respStr) + + switch resp.StatusCode { + case http.StatusTooManyRequests, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + // Retryable failure. + return newResponseError(resp.Header, bodyErr) default: - return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) + // Non-retryable failure. + return fmt.Errorf("failed to send to %s: %s (%w)", request.URL, resp.Status, bodyErr) } }) } @@ -291,24 +306,50 @@ func (r *request) reset(ctx context.Context) { // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 + err error } // newResponseError returns a retryableError and will extract any explicit -// throttle delay contained in headers. -func newResponseError(header http.Header) error { +// throttle delay contained in headers. The returned error wraps wrapped +// if it is not nil. +func newResponseError(header http.Header, wrapped error) error { var rErr retryableError if s, ok := header["Retry-After"]; ok { if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { rErr.throttle = t } } + + rErr.err = wrapped return rErr } func (e retryableError) Error() string { + if e.err != nil { + return "retry-able request failure: " + e.err.Error() + } + return "retry-able request failure" } +func (e retryableError) Unwrap() error { + return e.err +} + +func (e retryableError) As(target interface{}) bool { + if e.err == nil { + return false + } + + switch v := target.(type) { + case **retryableError: + *v = &e + return true + default: + return false + } +} + // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go index 43534cbfba4..9fea75ad19c 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go @@ -22,7 +22,7 @@ target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. -The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WitnInsecure], and [WithURLPath] options. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go index 26a316d003d..f30bb66aeda 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go @@ -15,6 +15,7 @@ import ( "strconv" "strings" "time" + "unicode" "go.opentelemetry.io/otel/internal/global" ) @@ -163,12 +164,16 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.PathUnescape(n) - if err != nil { - global.Error(err, "escape header key", "key", n) + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) continue } - trimmedName := strings.TrimSpace(name) + + // Only decode the value. value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) @@ -189,3 +194,22 @@ func createCertPool(certBytes []byte) (*x509.CertPool, error) { } return cp, nil } + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index 2ebbc752f4b..6a9c4d3a652 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -98,7 +98,7 @@ func cleanPath(urlPath string, defaultPath string) string { return defaultPath } if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) + tmp = "/" + tmp } return tmp } @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go index 6497f3ccdd0..3559c5664f4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -62,8 +62,10 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. Note, both environment variables include the full +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// Note, both environment variables include the full // scheme and path, while WithEndpoint sets only the host and port. // // If both this option and WithEndpointURL are used, the last used option will @@ -82,8 +84,9 @@ func WithEndpoint(endpoint string) Option { // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable -// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. // // If both this option and WithEndpoint are used, the last used option will // take precedence. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 14ad8c33b48..8ea156a0985 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.28.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go index a10ab7f1df8..660675dd620 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/config.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric" @@ -131,7 +132,10 @@ func WithoutScopeInfo() Option { // have special behavior based on their name. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { - ns = sanitizeName(ns) + if model.NameValidationScheme != model.UTF8Validation { + // Only sanitize if prometheus does not support UTF-8. + ns = model.EscapeName(ns, model.NameEscapingScheme) + } if !strings.HasSuffix(ns, "_") { // namespace and metric names should be separated with an underscore, // adds a trailing underscore if there is not one already. diff --git a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go index d2e387e607c..50c95a16f7f 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/prometheus/exporter.go @@ -11,11 +11,10 @@ import ( "slices" "strings" "sync" - "unicode" - "unicode/utf8" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" @@ -34,15 +33,14 @@ const ( scopeInfoMetricName = "otel_scope_info" scopeInfoDescription = "Instrumentation Scope metadata" + scopeNameLabel = "otel_scope_name" + scopeVersionLabel = "otel_scope_version" + traceIDExemplarKey = "trace_id" spanIDExemplarKey = "span_id" ) -var ( - scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"} - - errScopeInvalid = errors.New("invalid scope") -) +var errScopeInvalid = errors.New("invalid scope") // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. @@ -188,7 +186,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { } for _, scopeMetrics := range metrics.ScopeMetrics { - var keys, values [2]string + n := len(c.resourceKeyVals.keys) + 2 // resource attrs + scope name + scope version + kv := keyVals{ + keys: make([]string, 0, n), + vals: make([]string, 0, n), + } if !c.disableScopeInfo { scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) @@ -203,10 +205,13 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { ch <- scopeInfo - keys = scopeInfoKeys - values = [2]string{scopeMetrics.Scope.Name, scopeMetrics.Scope.Version} + kv.keys = append(kv.keys, scopeNameLabel, scopeVersionLabel) + kv.vals = append(kv.vals, scopeMetrics.Scope.Name, scopeMetrics.Scope.Version) } + kv.keys = append(kv.keys, c.resourceKeyVals.keys...) + kv.vals = append(kv.vals, c.resourceKeyVals.vals...) + for _, m := range scopeMetrics.Metrics { typ := c.metricType(m) if typ == nil { @@ -225,25 +230,27 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { switch v := m.Data.(type) { case metricdata.Histogram[int64]: - addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addHistogramMetric(ch, v, m, name, kv) case metricdata.Histogram[float64]: - addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addHistogramMetric(ch, v, m, name, kv) case metricdata.Sum[int64]: - addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addSumMetric(ch, v, m, name, kv) case metricdata.Sum[float64]: - addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addSumMetric(ch, v, m, name, kv) case metricdata.Gauge[int64]: - addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addGaugeMetric(ch, v, m, name, kv) case metricdata.Gauge[float64]: - addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals) + addGaugeMetric(ch, v, m, name, kv) } } } } -func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { +func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, name string, kv keyVals) { for _, dp := range histogram.DataPoints { - keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) buckets := make(map[float64]uint64, len(dp.Bounds)) @@ -263,14 +270,16 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra } } -func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { +func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, name string, kv keyVals) { valueType := prometheus.CounterValue if !sum.IsMonotonic { valueType = prometheus.GaugeValue } for _, dp := range sum.DataPoints { - keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...) @@ -278,14 +287,20 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata otel.Handle(err) continue } - m = addExemplars(m, dp.Exemplars) + // GaugeValues don't support Exemplars at this time + // https://github.com/prometheus/client_golang/blob/aef8aedb4b6e1fb8ac1c90790645169125594096/prometheus/metric.go#L199 + if valueType != prometheus.GaugeValue { + m = addExemplars(m, dp.Exemplars) + } ch <- m } } -func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) { +func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, name string, kv keyVals) { for _, dp := range gauge.DataPoints { - keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV) + keys, values := getAttrs(dp.Attributes) + keys = append(keys, kv.keys...) + values = append(values, kv.vals...) desc := prometheus.NewDesc(name, m.Description, keys, nil) m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...) @@ -297,61 +312,58 @@ func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metric } } -// getAttrs parses the attribute.Set to two lists of matching Prometheus-style -// keys and values. It sanitizes invalid characters and handles duplicate keys -// (due to sanitization) by sorting and concatenating the values following the spec. -func getAttrs(attrs attribute.Set, ks, vs [2]string, resourceKV keyVals) ([]string, []string) { - keysMap := make(map[string][]string) - itr := attrs.Iter() - for itr.Next() { - kv := itr.Attribute() - key := strings.Map(sanitizeRune, string(kv.Key)) - if _, ok := keysMap[key]; !ok { - keysMap[key] = []string{kv.Value.Emit()} - } else { - // if the sanitized key is a duplicate, append to the list of keys - keysMap[key] = append(keysMap[key], kv.Value.Emit()) - } - } - +// getAttrs converts the attribute.Set to two lists of matching Prometheus-style +// keys and values. +func getAttrs(attrs attribute.Set) ([]string, []string) { keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) - for key, vals := range keysMap { - keys = append(keys, key) - slices.Sort(vals) - values = append(values, strings.Join(vals, ";")) - } - - if ks[0] != "" { - keys = append(keys, ks[:]...) - values = append(values, vs[:]...) - } + itr := attrs.Iter() - for idx := range resourceKV.keys { - keys = append(keys, resourceKV.keys[idx]) - values = append(values, resourceKV.vals[idx]) + if model.NameValidationScheme == model.UTF8Validation { + // Do not perform sanitization if prometheus supports UTF-8. + for itr.Next() { + kv := itr.Attribute() + keys = append(keys, string(kv.Key)) + values = append(values, kv.Value.Emit()) + } + } else { + // It sanitizes invalid characters and handles duplicate keys + // (due to sanitization) by sorting and concatenating the values following the spec. + keysMap := make(map[string][]string) + for itr.Next() { + kv := itr.Attribute() + key := model.EscapeName(string(kv.Key), model.NameEscapingScheme) + if _, ok := keysMap[key]; !ok { + keysMap[key] = []string{kv.Value.Emit()} + } else { + // if the sanitized key is a duplicate, append to the list of keys + keysMap[key] = append(keysMap[key], kv.Value.Emit()) + } + } + for key, vals := range keysMap { + keys = append(keys, key) + slices.Sort(vals) + values = append(values, strings.Join(vals, ";")) + } } - return keys, values } func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { - keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{}, keyVals{}) + keys, values := getAttrs(*res.Set()) desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { - keys := scopeInfoKeys[:] - desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), scope.Name, scope.Version) -} + attrs := make([]attribute.KeyValue, 0, scope.Attributes.Len()+2) // resource attrs + scope name + scope version + attrs = append(attrs, scope.Attributes.ToSlice()...) + attrs = append(attrs, attribute.String(scopeNameLabel, scope.Name)) + attrs = append(attrs, attribute.String(scopeVersionLabel, scope.Version)) -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' { - return r - } - return '_' + keys, values := getAttrs(attribute.NewSet(attrs...)) + desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } var unitSuffixes = map[string]string{ @@ -392,7 +404,11 @@ var unitSuffixes = map[string]string{ // getName returns the sanitized name, prefixed with the namespace and suffixed with unit. func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { - name := sanitizeName(m.Name) + name := m.Name + if model.NameValidationScheme != model.UTF8Validation { + // Only sanitize if prometheus does not support UTF-8. + name = model.EscapeName(name, model.NameEscapingScheme) + } addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER if addCounterSuffix { // Remove the _total suffix here, as we will re-add the total suffix @@ -411,59 +427,6 @@ func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { return name } -func sanitizeName(n string) string { - // This algorithm is based on strings.Map from Go 1.19. - const replacement = '_' - - valid := func(i int, r rune) bool { - // Taken from - // https://github.com/prometheus/common/blob/dfbc25bd00225c70aca0d94c3c4bb7744f28ace0/model/metric.go#L92-L102 - if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || r == ':' || (r >= '0' && r <= '9' && i > 0) { - return true - } - return false - } - - // This output buffer b is initialized on demand, the first time a - // character needs to be replaced. - var b strings.Builder - for i, c := range n { - if valid(i, c) { - continue - } - - if i == 0 && c >= '0' && c <= '9' { - // Prefix leading number with replacement character. - b.Grow(len(n) + 1) - _ = b.WriteByte(byte(replacement)) - break - } - b.Grow(len(n)) - _, _ = b.WriteString(n[:i]) - _ = b.WriteByte(byte(replacement)) - width := utf8.RuneLen(c) - n = n[i+width:] - break - } - - // Fast path for unchanged input. - if b.Cap() == 0 { // b.Grow was not called above. - return n - } - - for _, c := range n { - // Due to inlining, it is more performant to invoke WriteByte rather then - // WriteRune. - if valid(1, c) { // We are guaranteed to not be at the start. - _ = b.WriteByte(byte(c)) - } else { - _ = b.WriteByte(byte(replacement)) - } - } - - return b.String() -} - func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { switch v := m.Data.(type) { case metricdata.Histogram[int64], metricdata.Histogram[float64]: @@ -489,7 +452,7 @@ func (c *collector) createResourceAttributes(res *resource.Resource) { defer c.mu.Unlock() resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter) - resourceKeys, resourceValues := getAttrs(resourceAttrs, [2]string{}, [2]string{}, keyVals{}) + resourceKeys, resourceValues := getAttrs(resourceAttrs) c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues} } @@ -584,7 +547,8 @@ func addExemplars[N int64 | float64](m prometheus.Metric, exemplars []metricdata func attributesToLabels(attrs []attribute.KeyValue) prometheus.Labels { labels := make(map[string]string) for _, attr := range attrs { - labels[string(attr.Key)] = attr.Value.Emit() + key := model.EscapeName(string(attr.Key), model.NameEscapingScheme) + labels[key] = attr.Value.Emit() } return labels } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 822d8479474..691d96c7554 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -49,12 +49,11 @@ func AsBoolSlice(v interface{}) []bool { if rv.Type().Kind() != reflect.Array { return nil } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) + cpy := make([]bool, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsInt64Slice converts an int64 array into a slice into with same elements as array. @@ -63,12 +62,11 @@ func AsInt64Slice(v interface{}) []int64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) + cpy := make([]int64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. @@ -77,12 +75,11 @@ func AsFloat64Slice(v interface{}) []float64 { if rv.Type().Kind() != reflect.Array { return nil } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) + cpy := make([]float64, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } // AsStringSlice converts a string array into a slice into with same elements as array. @@ -91,10 +88,9 @@ func AsStringSlice(v interface{}) []string { if rv.Type().Kind() != reflect.Array { return nil } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) + cpy := make([]string, rv.Len()) + if len(cpy) > 0 { + _ = reflect.Copy(reflect.ValueOf(cpy), rv) + } + return cpy } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a4..ae92a425166 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index e3db438a09f..a6acd8dca66 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" "reflect" "sync" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) defer m.mtx.Unlock() if m.delegate != nil { - insts = unwrapInstruments(insts) - return m.delegate.RegisterCallback(f, insts...) + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } reg := ®istration{instruments: insts, function: f} @@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -515,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48f..8982aa0dc56 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -25,6 +25,7 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" @@ -87,6 +88,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +104,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // @@ -139,6 +146,30 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart return delegate.(trace.Tracer).Start(ctx, name, opts...) } + return t.newSpan(ctx, autoInstEnabled, name, opts) +} + +// autoInstEnabled determines if the auto-instrumentation SDK span is returned +// from the tracer when not backed by a delegate and auto-instrumentation has +// attached to this process. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches. By default, this will point to false and mean a tracer will return +// a nonRecordingSpan by default. +var autoInstEnabled = new(bool) + +func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { + // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is + // so the auto-instrumentation can define a uprobe for (*t).newSpan and be + // provided with the address of the bool autoInstEnabled points to. It + // needs to be a parameter so that pointer can be reliably determined, it + // should not be read from the global. + + if *autoSpan { + tracer := sdk.TracerProvider().Tracer(t.name, t.opts...) + return tracer.Start(ctx, name, opts...) + } + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s diff --git a/vendor/go.opentelemetry.io/otel/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/log/DESIGN.md index 2bb8c3a643f..568df49d96e 100644 --- a/vendor/go.opentelemetry.io/otel/log/DESIGN.md +++ b/vendor/go.opentelemetry.io/otel/log/DESIGN.md @@ -26,14 +26,12 @@ This proposed design aims to: The API is published as a single `go.opentelemetry.io/otel/log` Go module. -The module name is compliant with -[Artifact Naming](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/bridge-api.md#artifact-naming) -and the package structure is the same as for Trace API and Metrics API. - +The package structure is similar to Trace API and Metrics API. The Go module consists of the following packages: - `go.opentelemetry.io/otel/log` - `go.opentelemetry.io/otel/log/embedded` +- `go.opentelemetry.io/otel/log/logtest` - `go.opentelemetry.io/otel/log/noop` Rejected alternative: @@ -105,16 +103,16 @@ is defined as `Record` struct in [record.go](record.go). is accessed using following methods: ```go -func (r *Record) Timestamp() time.Time -func (r *Record) SetTimestamp(t time.Time) +func (r *Record) Timestamp() time.Time +func (r *Record) SetTimestamp(t time.Time) ``` [`ObservedTimestamp`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-observedtimestamp) is accessed using following methods: ```go -func (r *Record) ObservedTimestamp() time.Time -func (r *Record) SetObservedTimestamp(t time.Time) +func (r *Record) ObservedTimestamp() time.Time +func (r *Record) SetObservedTimestamp(t time.Time) ``` [`SeverityNumber`](https://opentelemetry.io/docs/specs/otel/logs/data-model/#field-severitynumber) @@ -253,6 +251,23 @@ Rejected alternatives: - [Add XYZ method to Logger](#add-xyz-method-to-logger) - [Rename KeyValue to Attr](#rename-keyvalue-to-attr) +### Logger.Enabled + +The `Enabled` method implements the [`Enabled` operation](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/#enabled). + +[`Context` associated with the `LogRecord`](https://opentelemetry.io/docs/specs/otel/context/) +is accepted as a `context.Context` method argument. + +Calls to `Enabled` are supposed to be on the hot path and the list of arguments +can be extendend in future. Therefore, in order to reduce the number of heap +allocations and make it possible to handle new arguments, `Enabled` accepts +a `EnabledParameters` struct, defined in [logger.go](logger.go), as the second +method argument. + +The `EnabledParameters` uses fields, instead of getters and setters, to allow +simpler usage which allows configuring the `EnabledParameters` in the same line +where `Enabled` is called. + ### noop package The `go.opentelemetry.io/otel/log/noop` package provides @@ -307,7 +322,7 @@ The API needs to evolve orthogonally to `slog`. `slog` is not compliant with the [Logs Bridge API](https://opentelemetry.io/docs/specs/otel/logs/bridge-api/). and we cannot expect the Go team to make `slog` compliant with it. -The interoperabilty can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge). +The interoperability can be achieved using [a log bridge](https://opentelemetry.io/docs/specs/otel/glossary/#log-appender--bridge). You can read more about OpenTelemetry Logs design on [opentelemetry.io](https://opentelemetry.io/docs/concepts/signals/logs/). diff --git a/vendor/go.opentelemetry.io/otel/log/doc.go b/vendor/go.opentelemetry.io/otel/log/doc.go index 420b6898e89..18cbd1cb2e5 100644 --- a/vendor/go.opentelemetry.io/otel/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/log/doc.go @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 /* -Package log provides the OpenTelemetry Logs Bridge API. +Package log provides the OpenTelemetry Logs API. -This package is intended to be a bridge between existing logging libraries and -OpenTelemetry. It is not designed to be a logging API itself. +This package is intended to be used by bridges between existing logging +libraries and OpenTelemetry. Users should not directly use this package as a +logging library. Instead, install one of the bridges listed in the +[registry], and use the associated logging library. # API Implementations @@ -68,5 +70,7 @@ It is strongly recommended that authors only embed go.opentelemetry.io/otel/log/noop if they choose this default behavior. That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. + +[registry]: https://opentelemetry.io/ecosystem/registry/?language=go&component=log-bridge */ package log // import "go.opentelemetry.io/otel/log" diff --git a/vendor/go.opentelemetry.io/otel/log/global/README.md b/vendor/go.opentelemetry.io/otel/log/global/README.md new file mode 100644 index 00000000000..11e5afefc01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/global/README.md @@ -0,0 +1,3 @@ +# Log Global + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/log/global)](https://pkg.go.dev/go.opentelemetry.io/otel/log/global) diff --git a/vendor/go.opentelemetry.io/otel/log/global/log.go b/vendor/go.opentelemetry.io/otel/log/global/log.go new file mode 100644 index 00000000000..71ec577986d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/global/log.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package global provides access to a global implementation of the OpenTelemetry +Logs Bridge API. + +This package is experimental. It will be deprecated and removed when the [log] +package becomes stable. Its functionality will be migrated to +go.opentelemetry.io/otel. +*/ +package global // import "go.opentelemetry.io/otel/log/global" + +import ( + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/internal/global" +) + +// Logger returns a [log.Logger] configured with the provided name and options +// from the globally configured [log.LoggerProvider]. +// +// If this is called before a global LoggerProvider is configured, the returned +// Logger will be a No-Op implementation of a Logger. When a global +// LoggerProvider is registered for the first time, the returned Logger is +// updated in-place to report to this new LoggerProvider. There is no need to +// call this function again for an updated instance. +// +// This is a convenience function. It is equivalent to: +// +// GetLoggerProvider().Logger(name, options...) +func Logger(name string, options ...log.LoggerOption) log.Logger { + return GetLoggerProvider().Logger(name, options...) +} + +// GetLoggerProvider returns the globally configured [log.LoggerProvider]. +// +// If a global LoggerProvider has not been configured with [SetLoggerProvider], +// the returned Logger will be a No-Op implementation of a LoggerProvider. When +// a global LoggerProvider is registered for the first time, the returned +// LoggerProvider and all of its created Loggers are updated in-place. There is +// no need to call this function again for an updated instance. +func GetLoggerProvider() log.LoggerProvider { + return global.GetLoggerProvider() +} + +// SetLoggerProvider configures provider as the global [log.LoggerProvider]. +func SetLoggerProvider(provider log.LoggerProvider) { + global.SetLoggerProvider(provider) +} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/log.go b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go new file mode 100644 index 00000000000..d97ee966350 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/log.go @@ -0,0 +1,107 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/log/internal/global" + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/log/embedded" +) + +// instLib defines the instrumentation library a logger is created for. +// +// Do not use sdk/instrumentation (API cannot depend on the SDK). +type instLib struct { + name string + version string + schemaURL string + attrs attribute.Set +} + +type loggerProvider struct { + embedded.LoggerProvider + + mu sync.Mutex + loggers map[instLib]*logger + delegate log.LoggerProvider +} + +// Compile-time guarantee loggerProvider implements LoggerProvider. +var _ log.LoggerProvider = (*loggerProvider)(nil) + +func (p *loggerProvider) Logger(name string, options ...log.LoggerOption) log.Logger { + p.mu.Lock() + defer p.mu.Unlock() + + if p.delegate != nil { + return p.delegate.Logger(name, options...) + } + + cfg := log.NewLoggerConfig(options...) + key := instLib{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + attrs: cfg.InstrumentationAttributes(), + } + + if p.loggers == nil { + l := &logger{name: name, options: options} + p.loggers = map[instLib]*logger{key: l} + return l + } + + if l, ok := p.loggers[key]; ok { + return l + } + + l := &logger{name: name, options: options} + p.loggers[key] = l + return l +} + +func (p *loggerProvider) setDelegate(provider log.LoggerProvider) { + p.mu.Lock() + defer p.mu.Unlock() + + p.delegate = provider + for _, l := range p.loggers { + l.setDelegate(provider) + } + p.loggers = nil // Only set logger delegates once. +} + +type logger struct { + embedded.Logger + + name string + options []log.LoggerOption + + delegate atomic.Value // log.Logger +} + +// Compile-time guarantee logger implements Logger. +var _ log.Logger = (*logger)(nil) + +func (l *logger) Emit(ctx context.Context, r log.Record) { + if del, ok := l.delegate.Load().(log.Logger); ok { + del.Emit(ctx, r) + } +} + +func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { + var enabled bool + if del, ok := l.delegate.Load().(log.Logger); ok { + enabled = del.Enabled(ctx, param) + } + return enabled +} + +func (l *logger) setDelegate(provider log.LoggerProvider) { + l.delegate.Store(provider.Logger(l.name, l.options...)) +} diff --git a/vendor/go.opentelemetry.io/otel/log/internal/global/state.go b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go new file mode 100644 index 00000000000..dbe1c2fbfb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/log/internal/global/state.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/log/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/log" +) + +var ( + globalLoggerProvider = defaultLoggerProvider() + + delegateLoggerOnce sync.Once +) + +func defaultLoggerProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(loggerProviderHolder{provider: &loggerProvider{}}) + return v +} + +type loggerProviderHolder struct { + provider log.LoggerProvider +} + +// GetLoggerProvider returns the global LoggerProvider. +func GetLoggerProvider() log.LoggerProvider { + return globalLoggerProvider.Load().(loggerProviderHolder).provider +} + +// SetLoggerProvider sets the global LoggerProvider. +func SetLoggerProvider(provider log.LoggerProvider) { + current := GetLoggerProvider() + if _, cOk := current.(*loggerProvider); cOk { + if _, mpOk := provider.(*loggerProvider); mpOk && current == provider { + err := errors.New("invalid delegation: LoggerProvider self-delegation") + global.Error(err, "No delegate will be configured") + return + } + } + + delegateLoggerOnce.Do(func() { + if def, ok := current.(*loggerProvider); ok { + def.setDelegate(provider) + } + }) + globalLoggerProvider.Store(loggerProviderHolder{provider: provider}) +} diff --git a/vendor/go.opentelemetry.io/otel/log/keyvalue.go b/vendor/go.opentelemetry.io/otel/log/keyvalue.go index 8258defe360..2e1d30c1b88 100644 --- a/vendor/go.opentelemetry.io/otel/log/keyvalue.go +++ b/vendor/go.opentelemetry.io/otel/log/keyvalue.go @@ -76,7 +76,8 @@ func IntValue(v int) Value { return Int64Value(int64(v)) } // Int64Value returns a [Value] for an int64. func Int64Value(v int64) Value { - return Value{num: uint64(v), any: KindInt64} + // This can be later converted back to int64 (overflow not checked). + return Value{num: uint64(v), any: KindInt64} // nolint:gosec } // Float64Value returns a [Value] for a float64. @@ -146,7 +147,10 @@ func (v Value) AsInt64() int64 { // asInt64 returns the value held by v as an int64. If v is not of KindInt64, // this will return garbage. -func (v Value) asInt64() int64 { return int64(v.num) } +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} // AsBool returns the value held by v as a bool. func (v Value) AsBool() bool { @@ -289,7 +293,8 @@ func (v Value) String() string { case KindString: return v.asString() case KindInt64: - return strconv.FormatInt(int64(v.num), 10) + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec case KindFloat64: return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) case KindBool: @@ -351,16 +356,19 @@ func Bool(key string, value bool) KeyValue { } // Bytes returns a KeyValue for a []byte value. +// The passed slice must not be changed after it is passed. func Bytes(key string, value []byte) KeyValue { return KeyValue{key, BytesValue(value)} } // Slice returns a KeyValue for a []Value value. +// The passed slice must not be changed after it is passed. func Slice(key string, value ...Value) KeyValue { return KeyValue{key, SliceValue(value...)} } // Map returns a KeyValue for a map value. +// The passed slice must not be changed after it is passed. func Map(key string, value ...KeyValue) KeyValue { return KeyValue{key, MapValue(value...)} } diff --git a/vendor/go.opentelemetry.io/otel/log/logger.go b/vendor/go.opentelemetry.io/otel/log/logger.go index df2e88ea6b2..0773a49b608 100644 --- a/vendor/go.opentelemetry.io/otel/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/log/logger.go @@ -28,29 +28,35 @@ type Logger interface { // // Implementations of this method need to be safe for a user to call // concurrently. + // + // Notice: Emit is intended to be used by log bridges. + // Is should not be used for writing instrumentation. Emit(ctx context.Context, record Record) // Enabled returns whether the Logger emits for the given context and - // record. + // param. // - // The passed record is likely to be a partial record with only the - // bridge-relevant information being provided (e.g a record with only the + // The passed param is likely to be a partial record with only the + // bridge-relevant information being provided (e.g a param with only the // Severity set). If a Logger needs more information than is provided, it // is said to be in an indeterminate state (see below). // // The returned value will be true when the Logger will emit for the - // provided context and record, and will be false if the Logger will not + // provided context and param, and will be false if the Logger will not // emit. The returned value may be true or false in an indeterminate state. // An implementation should default to returning true for an indeterminate // state, but may return false if valid reasons in particular circumstances // exist (e.g. performance, correctness). // - // The record should not be held by the implementation. A copy should be + // The param should not be held by the implementation. A copy should be // made if the record needs to be held after the call returns. // // Implementations of this method need to be safe for a user to call // concurrently. - Enabled(ctx context.Context, record Record) bool + // + // Notice: Enabled is intended to be used by log bridges. + // Is should not be used for writing instrumentation. + Enabled(ctx context.Context, param EnabledParameters) bool } // LoggerOption applies configuration options to a [Logger]. @@ -129,3 +135,8 @@ func WithSchemaURL(schemaURL string) LoggerOption { return config }) } + +// EnabledParameters represents payload for [Logger]'s Enabled method. +type EnabledParameters struct { + Severity Severity +} diff --git a/vendor/go.opentelemetry.io/otel/log/noop/noop.go b/vendor/go.opentelemetry.io/otel/log/noop/noop.go index d2e21edba66..f45a7c7e0b3 100644 --- a/vendor/go.opentelemetry.io/otel/log/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/log/noop/noop.go @@ -47,4 +47,4 @@ type Logger struct{ embedded.Logger } func (Logger) Emit(context.Context, log.Record) {} // Enabled returns false. No log records are ever emitted. -func (Logger) Enabled(context.Context, log.Record) bool { return false } +func (Logger) Enabled(context.Context, log.EnabledParameters) bool { return false } diff --git a/vendor/go.opentelemetry.io/otel/log/provider.go b/vendor/go.opentelemetry.io/otel/log/provider.go index caa89ac024e..5c8ca328f87 100644 --- a/vendor/go.opentelemetry.io/otel/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/log/provider.go @@ -18,8 +18,19 @@ type LoggerProvider interface { // Logger returns a new [Logger] with the provided name and configuration. // + // The name needs to uniquely identify the source of logged code. It is + // recommended that name is the Go package name of the library using a log + // bridge (note: this is not the name of the bridge package). Most + // commonly, this means a bridge will need to accept this value from its + // users. + // // If name is empty, implementations need to provide a default name. // + // The version of the packages using a bridge can be critical information + // to include when logging. The bridge should accept this version + // information and use the [WithInstrumentationVersion] option to configure + // the Logger appropriately. + // // Implementations of this method need to be safe for a user to call // concurrently. Logger(name string, options ...LoggerOption) Logger diff --git a/vendor/go.opentelemetry.io/otel/log/record.go b/vendor/go.opentelemetry.io/otel/log/record.go index 96302f00624..7cf5446a041 100644 --- a/vendor/go.opentelemetry.io/otel/log/record.go +++ b/vendor/go.opentelemetry.io/otel/log/record.go @@ -16,6 +16,9 @@ const attributesInlineCount = 5 // Record represents a log record. type Record struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + timestamp time.Time observedTimestamp time.Time severity Severity diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045bb..34852a47b21 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md b/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md index 6d73150db5d..2e0fb15e29c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md +++ b/vendor/go.opentelemetry.io/otel/sdk/log/DESIGN.md @@ -5,13 +5,28 @@ `go.opentelemetry.io/otel/sdk/log` provides Logs SDK compliant with the [specification](https://opentelemetry.io/docs/specs/otel/logs/sdk/). -The main and recommended use case is to configure the SDK to use an OTLP -exporter with a batch processor.[^1] Therefore, the design aims to be -high-performant in this scenario. - The prototype was created in [#4955](https://github.com/open-telemetry/opentelemetry-go/pull/4955). +## Background + +The goal is to design the exported API of the SDK would have low performance +overhead. Most importantly, have a design that reduces the amount of heap +allocations and even make it possible to have a zero-allocation implementation. +Eliminating the amount of heap allocations reduces the GC pressure which can +produce some of the largest improvements in performance.[^1] + +The main and recommended use case is to configure the SDK to use an OTLP +exporter with a batch processor.[^2] Therefore, the implementation aims to be +high-performant in this scenario. Some users that require high throughput may +also want to use e.g. an [user_events](https://docs.kernel.org/trace/user_events.html), +[LLTng](https://lttng.org/docs/v2.13/#doc-tracing-your-own-user-application) +or [ETW](https://learn.microsoft.com/en-us/windows/win32/etw/about-event-tracing) +exporter with a simple processor. Users may also want to use +[OTLP File](https://opentelemetry.io/docs/specs/otel/protocol/file-exporter/) +or [Standard Output](https://opentelemetry.io/docs/specs/otel/logs/sdk_exporters/stdout/) +exporter in order to emit logs to standard output/error or files. + ## Modules structure The SDK is published as a single `go.opentelemetry.io/otel/sdk/log` Go module. @@ -107,15 +122,15 @@ The benchmark results can be found in [the prototype](https://github.com/open-te ## Rejected alternatives -### Represent both LogRecordProcessor and LogRecordExporter as Expoter +### Represent both LogRecordProcessor and LogRecordExporter as Exporter Because the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordprocessor) and the [LogRecordProcessor](https://opentelemetry.io/docs/specs/otel/logs/sdk/#logrecordexporter) abstractions are so similar, there was a proposal to unify them under -single `Expoter` interface.[^2] +single `Exporter` interface.[^3] However, introducing a `Processor` interface makes it easier -to create custom processor decorators[^3] +to create custom processor decorators[^4] and makes the design more aligned with the specification. ### Embed log.Record @@ -131,6 +146,31 @@ provided via API. Moreover it is safer to have these abstraction decoupled. E.g. there can be a need for some fields that can be set via API and cannot be modified by the processors. -[^1]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs) -[^2]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Expoter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480) -[^3]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9) +### Processor.OnEmit to accept Record values + +There was a proposal to make the [Processor](#processor)'s `OnEmit` +to accept a [Record](#record) value instead of a pointer to reduce allocations +as well as to have design similar to [`slog.Handler`](https://pkg.go.dev/log/slog#Handler). + +There have been long discussions within the OpenTelemetry Specification SIG[^5] +about whether such a design would comply with the specification. The summary +was that the current processor design flaws are present in other languages as +well. Therefore, it would be favorable to introduce new processing concepts +(e.g. chaining processors) in the specification that would coexist with the +current "mutable" processor design. + +The performance disadvantages caused by using a pointer (which at the time of +writing causes an additional heap allocation) may be mitigated by future +versions of the Go compiler, thanks to improved escape analysis and +profile-guided optimization (PGO)[^6]. + +On the other hand, [Processor](#processor)'s `Enabled` is fine to accept +a [Record](#record) value as the processors should not mutate the passed +parameters. + +[^1]: [A Guide to the Go Garbage Collector](https://tip.golang.org/doc/gc-guide) +[^2]: [OpenTelemetry Logging](https://opentelemetry.io/docs/specs/otel/logs) +[^3]: [Conversation on representing LogRecordProcessor and LogRecordExporter via a single Exporter interface](https://github.com/open-telemetry/opentelemetry-go/pull/4954#discussion_r1515050480) +[^4]: [Introduce Processor](https://github.com/pellared/opentelemetry-go/pull/9) +[^5]: [Log record mutations do not have to be visible in next registered processors](https://github.com/open-telemetry/opentelemetry-specification/pull/4067) +[^6]: [Profile-guided optimization](https://go.dev/doc/pgo) diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go index 8e43b0e8f75..28c969262b4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/batch.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/batch.go @@ -19,6 +19,7 @@ const ( dfltExpInterval = time.Second dfltExpTimeout = 30 * time.Second dfltExpMaxBatchSize = 512 + dfltExpBufferSize = 1 envarMaxQSize = "OTEL_BLRP_MAX_QUEUE_SIZE" envarExpInterval = "OTEL_BLRP_SCHEDULE_DELAY" @@ -96,6 +97,8 @@ type BatchProcessor struct { // stopped holds the stopped state of the BatchProcessor. stopped atomic.Bool + + noCmp [0]func() //nolint: unused // This is indeed used. } // NewBatchProcessor decorates the provided exporter @@ -117,8 +120,7 @@ func NewBatchProcessor(exporter Exporter, opts ...BatchProcessorOption) *BatchPr exporter = newChunkExporter(exporter, cfg.expMaxBatchSize.Value) b := &BatchProcessor{ - // TODO: explore making the size of this configurable. - exporter: newBufferExporter(exporter, 1), + exporter: newBufferExporter(exporter, cfg.expBufferSize.Value), q: newQueue(cfg.maxQSize.Value), batchSize: cfg.expMaxBatchSize.Value, @@ -176,11 +178,13 @@ func (b *BatchProcessor) poll(interval time.Duration) (done chan struct{}) { } // OnEmit batches provided log record. -func (b *BatchProcessor) OnEmit(_ context.Context, r Record) error { +func (b *BatchProcessor) OnEmit(_ context.Context, r *Record) error { if b.stopped.Load() || b.q == nil { return nil } - if n := b.q.Enqueue(r); n >= b.batchSize { + // The record is cloned so that changes done by subsequent processors + // are not going to lead to a data race. + if n := b.q.Enqueue(r.Clone()); n >= b.batchSize { select { case b.pollTrigger <- struct{}{}: default: @@ -192,11 +196,6 @@ func (b *BatchProcessor) OnEmit(_ context.Context, r Record) error { return nil } -// Enabled returns if b is enabled. -func (b *BatchProcessor) Enabled(context.Context, Record) bool { - return !b.stopped.Load() && b.q != nil -} - // Shutdown flushes queued log records and shuts down the decorated exporter. func (b *BatchProcessor) Shutdown(ctx context.Context) error { if b.stopped.Swap(true) || b.q == nil { @@ -350,6 +349,7 @@ type batchConfig struct { expInterval setting[time.Duration] expTimeout setting[time.Duration] expMaxBatchSize setting[int] + expBufferSize setting[int] } func newBatchConfig(options []BatchProcessorOption) batchConfig { @@ -383,6 +383,10 @@ func newBatchConfig(options []BatchProcessorOption) batchConfig { clampMax[int](c.maxQSize.Value), fallback[int](dfltExpMaxBatchSize), ) + c.expBufferSize = c.expBufferSize.Resolve( + clearLessThanOne[int](), + fallback[int](dfltExpBufferSize), + ) return c } @@ -459,3 +463,15 @@ func WithExportMaxBatchSize(size int) BatchProcessorOption { return cfg }) } + +// WithExportBufferSize sets the batch buffer size. +// Batches will be temporarily kept in a memory buffer until they are exported. +// +// By default, a value of 1 will be used. +// The default value is also used when the provided value is less than one. +func WithExportBufferSize(size int) BatchProcessorOption { + return batchOptionFunc(func(cfg batchConfig) batchConfig { + cfg.expBufferSize = newSetting(size) + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go index 10704030aba..14a581db6b6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/doc.go @@ -1,9 +1,39 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -// TODO (#5065): Expand documentation stub. - /* Package log provides the OpenTelemetry Logs SDK. + +See https://opentelemetry.io/docs/concepts/signals/logs/ for information +about the concept of OpenTelemetry Logs and +https://opentelemetry.io/docs/concepts/components/ for more information +about OpenTelemetry SDKs. + +The entry point for the log package is [NewLoggerProvider]. +[LoggerProvider] is the object that all Bridge API calls use to create +Loggers, and ultimately emit log records. +Also, it is an object that should be used to +control the life-cycle (start, flush, and shutdown) of the Logs SDK. + +A LoggerProvider needs to be configured to process the log records, this is +done by configuring it with a [Processor] implementation using [WithProcessor]. +The log package provides the [BatchProcessor] and [SimpleProcessor] +that are configured with an [Exporter] implementation which +exports the log records to given destination. See +[go.opentelemetry.io/otel/exporters] for exporters that can be used with these +Processors. + +The data generated by a LoggerProvider needs to include information about its +origin. A LoggerProvider needs to be configured with a Resource, by using +[WithResource], to include this information. This Resource +should be used to describe the unique runtime environment instrumented code +is being run on. That way when multiple instances of the code are collected +at a single endpoint their origin is decipherable. + +See [go.opentelemetry.io/otel/log] for more information about +the OpenTelemetry Logs Bridge API. + +See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about the +experimental features. */ package log // import "go.opentelemetry.io/otel/sdk/log" diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go index 1cdddc03e39..e4e3c5402bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/exporter.go @@ -15,10 +15,6 @@ import ( ) // Exporter handles the delivery of log records to external receivers. -// -// Any of the Exporter's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Exporter to manage -// this concurrency. type Exporter interface { // Export transmits log records to a receiver. // @@ -34,7 +30,11 @@ type Exporter interface { // // Before modifying a Record, the implementation must use Record.Clone // to create a copy that shares no state with the original. + // + // Export should never be called concurrently with other Export calls. + // However, it may be called concurrently with other methods. Export(ctx context.Context, records []Record) error + // Shutdown is called when the SDK shuts down. Any cleanup or release of // resources held by the exporter should be done in this call. // @@ -43,12 +43,17 @@ type Exporter interface { // // After Shutdown is called, calls to Export, Shutdown, or ForceFlush // should perform no operation and return nil error. + // + // Shutdown may be called concurrently with itself or with other methods. Shutdown(ctx context.Context) error + // ForceFlush exports log records to the configured Exporter that have not yet // been exported. // // The deadline or cancellation of the passed context must be honored. An // appropriate error should be returned in these situations. + // + // ForceFlush may be called concurrently with itself or with other methods. ForceFlush(ctx context.Context) error } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md new file mode 100644 index 00000000000..73f4db626af --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/README.md @@ -0,0 +1,35 @@ +# Experimental Features + +The Logs SDK contains features that have not yet stabilized. +These features are added to the OpenTelemetry Go Logs SDK prior to stabilization so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Filter Processors](#filter-processor) + +### Filter Processor + +Users of logging libraries often want to know if a log `Record` will be processed or dropped before they perform complex operations to construct the `Record`. +The [`Logger`] in the Logs Bridge API provides the `Enabled` method for just this use-case. +In order for the Logs Bridge SDK to effectively implement this API, it needs to be known if the registered [`Processor`]s are enabled for the `Record` within a context. +A [`Processor`] that knows, and can identify, what `Record` it will process or drop when it is passed to `OnEmit` can communicate this to the SDK `Logger` by implementing the `FilterProcessor`. + +By default, the SDK `Logger.Enabled` will return true when called. +Only if all the registered [`Processor`]s implement `FilterProcessor` and they all return `false` will `Logger.Enabled` return `false`. + +See the [`minsev`] [`Processor`] for an example use-case. +It is used to filter `Record`s out that a have a `Severity` below a threshold. + +[`Logger`]: https://pkg.go.dev/go.opentelemetry.io/otel/log#Logger +[`Processor`]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log#Processor +[`minsev`]: https://pkg.go.dev/go.opentelemetry.io/contrib/processors/minsev + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go new file mode 100644 index 00000000000..ca78d109778 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/log/internal/x/x.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for Logs SDK experimental features. +package x // import "go.opentelemetry.io/otel/sdk/log/internal/x" + +import ( + "context" + + "go.opentelemetry.io/otel/log" +) + +// FilterProcessor is a [go.opentelemetry.io/otel/sdk/log.Processor] that knows, +// and can identify, what [log.Record] it will process or drop when it is +// passed to OnEmit. +// +// This is useful for users of logging libraries that want to know if a [log.Record] +// will be processed or dropped before they perform complex operations to +// construct the [log.Record]. +// +// Processor implementations that choose to support this by satisfying this +// interface are expected to re-evaluate the [log.Record]s passed to OnEmit, it is +// not expected that the caller to OnEmit will use the functionality from this +// interface prior to calling OnEmit. +// +// This should only be implemented for Processors that can make reliable +// enough determination of this prior to processing a [log.Record] and where +// the result is dynamic. +// +// [Processor]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/log#Processor +type FilterProcessor interface { + // Enabled returns whether the Processor will process for the given context + // and param. + // + // The passed param is likely to be a partial record with only the + // bridge-relevant information being provided (e.g a record with only the + // Severity set). If a Logger needs more information than is provided, it + // is said to be in an indeterminate state (see below). + // + // The returned value will be true when the Processor will process for the + // provided context and param, and will be false if the Processor will not + // process. An implementation should default to returning true for an + // indeterminate state. + // + // Implementations should not modify the param. + Enabled(ctx context.Context, param log.EnabledParameters) bool +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go index 245867f3fd6..d6ca2ea41aa 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/logger.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/logger.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log/internal/x" "go.opentelemetry.io/otel/trace" ) @@ -36,19 +37,35 @@ func newLogger(p *LoggerProvider, scope instrumentation.Scope) *logger { func (l *logger) Emit(ctx context.Context, r log.Record) { newRecord := l.newRecord(ctx, r) for _, p := range l.provider.processors { - if err := p.OnEmit(ctx, newRecord); err != nil { + if err := p.OnEmit(ctx, &newRecord); err != nil { otel.Handle(err) } } } -func (l *logger) Enabled(ctx context.Context, r log.Record) bool { - newRecord := l.newRecord(ctx, r) - for _, p := range l.provider.processors { - if enabled := p.Enabled(ctx, newRecord); enabled { +// Enabled returns true if at least one Processor held by the LoggerProvider +// that created the logger will process param for the provided context and param. +// +// If it is not possible to definitively determine the param will be +// processed, true will be returned by default. A value of false will only be +// returned if it can be positively verified that no Processor will process. +func (l *logger) Enabled(ctx context.Context, param log.EnabledParameters) bool { + fltrs := l.provider.filterProcessors() + // If there are more Processors than FilterProcessors we cannot be sure + // that all Processors will drop the record. Therefore, return true. + // + // If all Processors are FilterProcessors, check if any is enabled. + return len(l.provider.processors) > len(fltrs) || anyEnabled(ctx, param, fltrs) +} + +func anyEnabled(ctx context.Context, param log.EnabledParameters, fltrs []x.FilterProcessor) bool { + for _, f := range fltrs { + if f.Enabled(ctx, param) { + // At least one Processor will process the Record. return true } } + // No Processor will process the record return false } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go index f95ea949027..fcab34c7a48 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/processor.go @@ -12,6 +12,9 @@ import ( // Any of the Processor's methods may be called concurrently with itself // or with other methods. It is the responsibility of the Processor to manage // this concurrency. +// +// See [go.opentelemetry.io/otel/sdk/log/internal/x] for information about how +// a Processor can be extended to support experimental features. type Processor interface { // OnEmit is called when a Record is emitted. // @@ -26,27 +29,15 @@ type Processor interface { // considered unrecoverable and will be reported to a configured error // Handler. // - // Before modifying a Record, the implementation must use Record.Clone + // The SDK invokes the processors sequentially in the same order as + // they were registered using [WithProcessor]. + // Implementations may synchronously modify the record so that the changes + // are visible in the next registered processor. + // Notice that [Record] is not concurrent safe. Therefore, asynchronous + // processing may cause race conditions. Use [Record.Clone] // to create a copy that shares no state with the original. - OnEmit(ctx context.Context, record Record) error - // Enabled returns whether the Processor will process for the given context - // and record. - // - // The passed record is likely to be a partial record with only the - // bridge-relevant information being provided (e.g a record with only the - // Severity set). If a Logger needs more information than is provided, it - // is said to be in an indeterminate state (see below). - // - // The returned value will be true when the Processor will process for the - // provided context and record, and will be false if the Processor will not - // process. The returned value may be true or false in an indeterminate - // state. An implementation should default to returning true for an - // indeterminate state, but may return false if valid reasons in particular - // circumstances exist (e.g. performance, correctness). - // - // Before modifying a Record, the implementation must use Record.Clone - // to create a copy that shares no state with the original. - Enabled(ctx context.Context, record Record) bool + OnEmit(ctx context.Context, record *Record) error + // Shutdown is called when the SDK shuts down. Any cleanup or release of // resources held by the exporter should be done in this call. // @@ -56,6 +47,7 @@ type Processor interface { // After Shutdown is called, calls to Export, Shutdown, or ForceFlush // should perform no operation and return nil error. Shutdown(ctx context.Context) error + // ForceFlush exports log records to the configured Exporter that have not yet // been exported. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go index 84bb14c5ec7..8c825e6ab79 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/provider.go @@ -9,11 +9,13 @@ import ( "sync" "sync/atomic" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/log" "go.opentelemetry.io/otel/log/embedded" "go.opentelemetry.io/otel/log/noop" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/log/internal/x" "go.opentelemetry.io/otel/sdk/resource" ) @@ -65,10 +67,15 @@ type LoggerProvider struct { attributeCountLimit int attributeValueLengthLimit int + fltrProcessorsOnce sync.Once + fltrProcessors []x.FilterProcessor + loggersMu sync.Mutex loggers map[instrumentation.Scope]*logger stopped atomic.Bool + + noCmp [0]func() //nolint: unused // This is indeed used. } // Compile-time check LoggerProvider implements log.LoggerProvider. @@ -90,6 +97,17 @@ func NewLoggerProvider(opts ...LoggerProviderOption) *LoggerProvider { } } +func (p *LoggerProvider) filterProcessors() []x.FilterProcessor { + p.fltrProcessorsOnce.Do(func() { + for _, proc := range p.processors { + if f, ok := proc.(x.FilterProcessor); ok { + p.fltrProcessors = append(p.fltrProcessors, f) + } + } + }) + return p.fltrProcessors +} + // Logger returns a new [log.Logger] with the provided name and configuration. // // If p is shut down, a [noop.Logger] instance is returned. @@ -106,9 +124,10 @@ func (p *LoggerProvider) Logger(name string, opts ...log.LoggerOption) log.Logge cfg := log.NewLoggerConfig(opts...) scope := instrumentation.Scope{ - Name: name, - Version: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), + Name: name, + Version: cfg.InstrumentationVersion(), + SchemaURL: cfg.SchemaURL(), + Attributes: cfg.InstrumentationAttributes(), } p.loggersMu.Lock() @@ -179,7 +198,11 @@ func (fn loggerProviderOptionFunc) apply(c providerConfig) providerConfig { // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) LoggerProviderOption { return loggerProviderOptionFunc(func(cfg providerConfig) providerConfig { - cfg.resource = res + var err error + cfg.resource, err = resource.Merge(resource.Environment(), res) + if err != nil { + otel.Handle(err) + } return cfg }) } @@ -189,8 +212,8 @@ func WithResource(res *resource.Resource) LoggerProviderOption { // By default, if this option is not used, the LoggerProvider will perform no // operations; no data will be exported without a processor. // -// Each WithProcessor creates a separate pipeline. Use custom decorators -// for advanced scenarios such as enriching with attributes. +// The SDK invokes the processors sequentially in the same order as they were +// registered. // // For production, use [NewBatchProcessor] to batch log records before they are exported. // For testing and debugging, use [NewSimpleProcessor] to synchronously export log records. diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/record.go b/vendor/go.opentelemetry.io/otel/sdk/log/record.go index a6e50df7782..f04e5b28f95 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/record.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/record.go @@ -42,6 +42,10 @@ func putIndex(index map[string]int) { } // Record is a log record emitted by the Logger. +// +// Do not create instances of Record on your own in production code. +// You can use [go.opentelemetry.io/otel/sdk/log/logtest.RecordFactory] +// for testing purposes. type Record struct { // Do not embed the log.Record. Attributes need to be overwrite-able and // deep-copying needs to be possible. @@ -86,6 +90,8 @@ type Record struct { attributeValueLengthLimit int attributeCountLimit int + + noCmp [0]func() //nolint: unused // This is indeed used. } func (r *Record) addDropped(n int) { @@ -228,7 +234,7 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) { // // Do not use head(attrs, r.attributeCountLimit - n) here. If // (r.attributeCountLimit - n) <= 0 attrs needs to be emptied. - last := max(0, (r.attributeCountLimit - n)) + last := max(0, r.attributeCountLimit-n) r.addDropped(len(attrs) - last) attrs = attrs[:last] } @@ -400,7 +406,7 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { case log.KindString: s := val.AsString() if len(s) > r.attributeValueLengthLimit { - val = log.StringValue(truncate(s, r.attributeValueLengthLimit)) + val = log.StringValue(truncate(r.attributeValueLengthLimit, s)) } case log.KindSlice: sl := val.AsSlice() @@ -421,40 +427,78 @@ func (r *Record) applyValueLimits(val log.Value) log.Value { return val } -// truncate returns a copy of str truncated to have a length of at most n -// characters. If the length of str is less than n, str itself is returned. +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. // -// The truncate of str ensures that no valid UTF-8 code point is split. The -// copy returned will be less than n if a characters straddles the length -// limit. +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. // -// No truncation is performed if n is less than zero. -func truncate(str string, n int) string { - if n < 0 { - return str +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s } - // cut returns a copy of the s truncated to not exceed a length of n. If - // invalid UTF-8 is encountered, s is returned with false. Otherwise, the - // truncated copy will be returned with true. - cut := func(s string) (string, bool) { - var i int - for i = 0; i < n; { - r, size := utf8.DecodeRuneInString(s[i:]) - if r == utf8.RuneError { - return s, false - } - if i+size > n { - break + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] } - i += size + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break } - return s[:i], true } - cp, ok := cut(str) - if !ok { - cp, _ = cut(strings.ToValidUTF8(str, "")) + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - return cp + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go index fc5690b22d5..002e52cae66 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/log/simple.go +++ b/vendor/go.opentelemetry.io/otel/sdk/log/simple.go @@ -12,22 +12,24 @@ import ( var _ Processor = (*SimpleProcessor)(nil) // SimpleProcessor is an processor that synchronously exports log records. +// +// Use [NewSimpleProcessor] to create a SimpleProcessor. type SimpleProcessor struct { + mu sync.Mutex exporter Exporter + + noCmp [0]func() //nolint: unused // This is indeed used. } // NewSimpleProcessor is a simple Processor adapter. // -// This Processor is not recommended for production use. The synchronous -// nature of this Processor make it good for testing, debugging, or -// showing examples of other features, but it can be slow and have a high -// computation resource usage overhead. [NewBatchProcessor] is recommended -// for production use instead. +// This Processor is not recommended for production use due to its synchronous +// nature, which makes it suitable for testing, debugging, or demonstrating +// other features, but can lead to slow performance and high computational +// overhead. For production environments, it is recommended to use +// [NewBatchProcessor] instead. However, there may be exceptions where certain +// [Exporter] implementations perform better with this Processor. func NewSimpleProcessor(exporter Exporter, _ ...SimpleProcessorOption) *SimpleProcessor { - if exporter == nil { - // Do not panic on nil exporter. - exporter = defaultNoopExporter - } return &SimpleProcessor{exporter: exporter} } @@ -39,9 +41,16 @@ var simpleProcRecordsPool = sync.Pool{ } // OnEmit batches provided log record. -func (s *SimpleProcessor) OnEmit(ctx context.Context, r Record) error { +func (s *SimpleProcessor) OnEmit(ctx context.Context, r *Record) error { + if s.exporter == nil { + return nil + } + + s.mu.Lock() + defer s.mu.Unlock() + records := simpleProcRecordsPool.Get().(*[]Record) - (*records)[0] = r + (*records)[0] = *r defer func() { simpleProcRecordsPool.Put(records) }() @@ -49,18 +58,21 @@ func (s *SimpleProcessor) OnEmit(ctx context.Context, r Record) error { return s.exporter.Export(ctx, *records) } -// Enabled returns true. -func (s *SimpleProcessor) Enabled(context.Context, Record) bool { - return true -} - -// Shutdown shuts down the expoter. +// Shutdown shuts down the exporter. func (s *SimpleProcessor) Shutdown(ctx context.Context) error { + if s.exporter == nil { + return nil + } + return s.exporter.Shutdown(ctx) } // ForceFlush flushes the exporter. func (s *SimpleProcessor) ForceFlush(ctx context.Context) error { + if s.exporter == nil { + return nil + } + return s.exporter.ForceFlush(ctx) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go index bbe7bf671fd..203cd9d6508 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/config.go @@ -5,17 +5,22 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" + "os" + "strings" "sync" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/resource" ) // config contains configuration options for a MeterProvider. type config struct { - res *resource.Resource - readers []Reader - views []View + res *resource.Resource + readers []Reader + views []View + exemplarFilter exemplar.Filter } // readerSignals returns a force-flush and shutdown function for a @@ -39,25 +44,13 @@ func (c config) readerSignals() (forceFlush, shutdown func(context.Context) erro // value. func unify(funcs []func(context.Context) error) func(context.Context) error { return func(ctx context.Context) error { - var errs []error + var err error for _, f := range funcs { - if err := f(ctx); err != nil { - errs = append(errs, err) + if e := f(ctx); e != nil { + err = errors.Join(err, e) } } - return unifyErrors(errs) - } -} - -// unifyErrors combines multiple errors into a single error. -func unifyErrors(errs []error) error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return fmt.Errorf("%v", errs) + return err } } @@ -75,7 +68,13 @@ func unifyShutdown(funcs []func(context.Context) error) func(context.Context) er // newConfig returns a config configured with options. func newConfig(options []Option) config { - conf := config{res: resource.Default()} + conf := config{ + res: resource.Default(), + exemplarFilter: exemplar.TraceBasedFilter, + } + for _, o := range meterProviderOptionsFromEnv() { + conf = o.apply(conf) + } for _, o := range options { conf = o.apply(conf) } @@ -103,7 +102,11 @@ func (o optionFunc) apply(conf config) config { // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) Option { return optionFunc(func(conf config) config { - conf.res = res + var err error + conf.res, err = resource.Merge(resource.Environment(), res) + if err != nil { + otel.Handle(err) + } return conf }) } @@ -135,3 +138,35 @@ func WithView(views ...View) Option { return cfg }) } + +// WithExemplarFilter configures the exemplar filter. +// +// The exemplar filter determines which measurements are offered to the +// exemplar reservoir, but the exemplar reservoir makes the final decision of +// whether to store an exemplar. +// +// By default, the [exemplar.SampledFilter] +// is used. Exemplars can be entirely disabled by providing the +// [exemplar.AlwaysOffFilter]. +func WithExemplarFilter(filter exemplar.Filter) Option { + return optionFunc(func(cfg config) config { + cfg.exemplarFilter = filter + return cfg + }) +} + +func meterProviderOptionsFromEnv() []Option { + var opts []Option + // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar + const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" + + switch strings.ToLower(strings.TrimSpace(os.Getenv(filterEnvKey))) { + case "always_on": + opts = append(opts, WithExemplarFilter(exemplar.AlwaysOnFilter)) + case "always_off": + opts = append(opts, WithExemplarFilter(exemplar.AlwaysOffFilter)) + case "trace_based": + opts = append(opts, WithExemplarFilter(exemplar.TraceBasedFilter)) + } + return opts +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 4f553a57153..90a4ae16c1a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -31,6 +31,14 @@ // is being run on. That way when multiple instances of the code are collected // at a single endpoint their origin is decipherable. // +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go index 82619da78ec..0335b8ae48e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar.go @@ -4,51 +4,49 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( - "os" "runtime" - "slices" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - "go.opentelemetry.io/otel/sdk/metric/internal/x" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/exemplar" + "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) -// reservoirFunc returns the appropriately configured exemplar reservoir -// creation func based on the passed InstrumentKind and user defined -// environment variables. -// -// Note: This will only return non-nil values when the experimental exemplar -// feature is enabled and the OTEL_METRICS_EXEMPLAR_FILTER environment variable -// is not set to always_off. -func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredReservoir[N] { - if !x.Exemplars.Enabled() { - return nil - } - // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/configuration/sdk-environment-variables.md#exemplar - const filterEnvKey = "OTEL_METRICS_EXEMPLAR_FILTER" +// ExemplarReservoirProviderSelector selects the +// [exemplar.ReservoirProvider] to use +// based on the [Aggregation] of the metric. +type ExemplarReservoirProviderSelector func(Aggregation) exemplar.ReservoirProvider - var filter exemplar.Filter - - switch os.Getenv(filterEnvKey) { - case "always_on": - filter = exemplar.AlwaysOnFilter - case "always_off": - return exemplar.Drop - case "trace_based": - fallthrough - default: - filter = exemplar.SampledFilter +// reservoirFunc returns the appropriately configured exemplar reservoir +// creation func based on the passed InstrumentKind and filter configuration. +func reservoirFunc[N int64 | float64](provider exemplar.ReservoirProvider, filter exemplar.Filter) func(attribute.Set) aggregate.FilteredExemplarReservoir[N] { + return func(attrs attribute.Set) aggregate.FilteredExemplarReservoir[N] { + return aggregate.NewFilteredExemplarReservoir[N](filter, provider(attrs)) } +} +// DefaultExemplarReservoirProviderSelector returns the default +// [exemplar.ReservoirProvider] for the +// provided [Aggregation]. +// +// For explicit bucket histograms with more than 1 bucket, it uses the +// [exemplar.HistogramReservoirProvider]. +// For exponential histograms, it uses the +// [exemplar.FixedSizeReservoirProvider] +// with a size of min(20, max_buckets). +// For all other aggregations, it uses the +// [exemplar.FixedSizeReservoirProvider] +// with a size equal to the number of CPUs. +// +// Exemplar default reservoirs MAY change in a minor version bump. No +// guarantees are made on the shape or statistical properties of returned +// exemplars. +func DefaultExemplarReservoirProviderSelector(agg Aggregation) exemplar.ReservoirProvider { // https://github.com/open-telemetry/opentelemetry-specification/blob/d4b241f451674e8f611bb589477680341006ad2b/specification/metrics/sdk.md#exemplar-defaults // Explicit bucket histogram aggregation with more than 1 bucket will // use AlignedHistogramBucketExemplarReservoir. a, ok := agg.(AggregationExplicitBucketHistogram) if ok && len(a.Boundaries) > 0 { - cp := slices.Clone(a.Boundaries) - return func() exemplar.FilteredReservoir[N] { - bounds := cp - return exemplar.NewFilteredReservoir[N](filter, exemplar.Histogram(bounds)) - } + return exemplar.HistogramReservoirProvider(a.Boundaries) } var n int @@ -75,7 +73,5 @@ func reservoirFunc[N int64 | float64](agg Aggregation) func() exemplar.FilteredR } } - return func() exemplar.FilteredReservoir[N] { - return exemplar.NewFilteredReservoir[N](filter, exemplar.FixedSize(n)) - } + return exemplar.FixedSizeReservoirProvider(n) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md new file mode 100644 index 00000000000..d1025f5eb89 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/README.md @@ -0,0 +1,3 @@ +# Metric SDK Exemplars + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/metric/exemplar)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric/exemplar) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go similarity index 93% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go index 5394f48e0df..9f238937688 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/doc.go @@ -3,4 +3,4 @@ // Package exemplar provides an implementation of the OpenTelemetry exemplar // reservoir to be used in metric collection pipelines. -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go index fcaa6a4697c..1ab69467868 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/exemplar.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "time" diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go similarity index 75% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go index 152a069a09e..b595e2acef3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/filter.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "context" @@ -16,10 +16,10 @@ import ( // Reservoir in making a sampling decision. type Filter func(context.Context) bool -// SampledFilter is a [Filter] that will only offer measurements +// TraceBasedFilter is a [Filter] that will only offer measurements // if the passed context associated with the measurement contains a sampled // [go.opentelemetry.io/otel/trace.SpanContext]. -func SampledFilter(ctx context.Context) bool { +func TraceBasedFilter(ctx context.Context) bool { return trace.SpanContextFromContext(ctx).IsSampled() } @@ -27,3 +27,8 @@ func SampledFilter(ctx context.Context) bool { func AlwaysOnFilter(ctx context.Context) bool { return true } + +// AlwaysOffFilter is a [Filter] that never offers measurements. +func AlwaysOffFilter(ctx context.Context) bool { + return false +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go similarity index 73% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go index 199a2608f71..d4aab0aad4f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/rand.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/fixed_size_reservoir.go @@ -1,31 +1,69 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "context" "math" "math/rand" - "sync" "time" "go.opentelemetry.io/otel/attribute" ) -var ( +// FixedSizeReservoirProvider returns a provider of [FixedSizeReservoir]. +func FixedSizeReservoirProvider(k int) ReservoirProvider { + return func(_ attribute.Set) Reservoir { + return NewFixedSizeReservoir(k) + } +} + +// NewFixedSizeReservoir returns a [FixedSizeReservoir] that samples at most +// k exemplars. If there are k or less measurements made, the Reservoir will +// sample each one. If there are more than k, the Reservoir will then randomly +// sample all additional measurement with a decreasing probability. +func NewFixedSizeReservoir(k int) *FixedSizeReservoir { + return newFixedSizeReservoir(newStorage(k)) +} + +var _ Reservoir = &FixedSizeReservoir{} + +// FixedSizeReservoir is a [Reservoir] that samples at most k exemplars. If +// there are k or less measurements made, the Reservoir will sample each one. +// If there are more than k, the Reservoir will then randomly sample all +// additional measurement with a decreasing probability. +type FixedSizeReservoir struct { + *storage + + // count is the number of measurement seen. + count int64 + // next is the next count that will store a measurement at a random index + // once the reservoir has been filled. + next int64 + // w is the largest random number in a distribution that is used to compute + // the next next. + w float64 + // rng is used to make sampling decisions. // // Do not use crypto/rand. There is no reason for the decrease in performance // given this is not a security sensitive decision. - rng = rand.New(rand.NewSource(time.Now().UnixNano())) - // Ensure concurrent safe accecess to rng and its underlying source. - rngMu sync.Mutex -) + rng *rand.Rand +} -// random returns, as a float64, a uniform pseudo-random number in the open -// interval (0.0,1.0). -func random() float64 { +func newFixedSizeReservoir(s *storage) *FixedSizeReservoir { + r := &FixedSizeReservoir{ + storage: s, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } + r.reset() + return r +} + +// randomFloat64 returns, as a float64, a uniform pseudo-random number in the +// open interval (0.0,1.0). +func (r *FixedSizeReservoir) randomFloat64() float64 { // TODO: This does not return a uniform number. rng.Float64 returns a // uniformly random int in [0,2^53) that is divided by 2^53. Meaning it // returns multiples of 2^-53, and not all floating point numbers between 0 @@ -43,40 +81,25 @@ func random() float64 { // // There are likely many other methods to explore here as well. - rngMu.Lock() - defer rngMu.Unlock() - - f := rng.Float64() + f := r.rng.Float64() for f == 0 { - f = rng.Float64() + f = r.rng.Float64() } return f } -// FixedSize returns a [Reservoir] that samples at most k exemplars. If there -// are k or less measurements made, the Reservoir will sample each one. If -// there are more than k, the Reservoir will then randomly sample all -// additional measurement with a decreasing probability. -func FixedSize(k int) Reservoir { - r := &randRes{storage: newStorage(k)} - r.reset() - return r -} - -type randRes struct { - *storage - - // count is the number of measurement seen. - count int64 - // next is the next count that will store a measurement at a random index - // once the reservoir has been filled. - next int64 - // w is the largest random number in a distribution that is used to compute - // the next next. - w float64 -} - -func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { +// Offer accepts the parameters associated with a measurement. The +// parameters will be stored as an exemplar if the Reservoir decides to +// sample the measurement. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +// +// The time t is the time when the measurement was made. The v and a +// parameters are the value and dropped (filtered) attributes of the +// measurement respectively. +func (r *FixedSizeReservoir) Offer(ctx context.Context, t time.Time, n Value, a []attribute.KeyValue) { // The following algorithm is "Algorithm L" from Li, Kim-Hung (4 December // 1994). "Reservoir-Sampling Algorithms of Time Complexity // O(n(1+log(N/n)))". ACM Transactions on Mathematical Software. 20 (4): @@ -123,7 +146,7 @@ func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute } else { if r.count == r.next { // Overwrite a random existing measurement with the one offered. - idx := int(rng.Int63n(int64(cap(r.store)))) + idx := int(r.rng.Int63n(int64(cap(r.store)))) r.store[idx] = newMeasurement(ctx, t, n, a) r.advance() } @@ -132,7 +155,7 @@ func (r *randRes) Offer(ctx context.Context, t time.Time, n Value, a []attribute } // reset resets r to the initial state. -func (r *randRes) reset() { +func (r *FixedSizeReservoir) reset() { // This resets the number of exemplars known. r.count = 0 // Random index inserts should only happen after the storage is full. @@ -147,14 +170,14 @@ func (r *randRes) reset() { // This maps the uniform random number in (0,1) to a geometric distribution // over the same interval. The mean of the distribution is inversely // proportional to the storage capacity. - r.w = math.Exp(math.Log(random()) / float64(cap(r.store))) + r.w = math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) r.advance() } // advance updates the count at which the offered measurement will overwrite an // existing exemplar. -func (r *randRes) advance() { +func (r *FixedSizeReservoir) advance() { // Calculate the next value in the random number series. // // The current value of r.w is based on the max of a distribution of random @@ -167,7 +190,7 @@ func (r *randRes) advance() { // therefore the next r.w will be based on the same distribution (i.e. // `max(u_1,u_2,...,u_k)`). Therefore, we can sample the next r.w by // computing the next random number `u` and take r.w as `w * u^(1/k)`. - r.w *= math.Exp(math.Log(random()) / float64(cap(r.store))) + r.w *= math.Exp(math.Log(r.randomFloat64()) / float64(cap(r.store))) // Use the new random number in the series to calculate the count of the // next measurement that will be stored. // @@ -178,10 +201,13 @@ func (r *randRes) advance() { // // Important to note, the new r.next will always be at least 1 more than // the last r.next. - r.next += int64(math.Log(random())/math.Log(1-r.w)) + 1 + r.next += int64(math.Log(r.randomFloat64())/math.Log(1-r.w)) + 1 } -func (r *randRes) Collect(dest *[]Exemplar) { +// Collect returns all the held exemplars. +// +// The Reservoir state is preserved after this call. +func (r *FixedSizeReservoir) Collect(dest *[]Exemplar) { r.storage.Collect(dest) // Call reset here even though it will reset r.count and restart the random // number series. This will persist any old exemplars as long as no new diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go new file mode 100644 index 00000000000..3b76cf305a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/histogram_reservoir.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" + +import ( + "context" + "slices" + "sort" + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// HistogramReservoirProvider is a provider of [HistogramReservoir]. +func HistogramReservoirProvider(bounds []float64) ReservoirProvider { + cp := slices.Clone(bounds) + slices.Sort(cp) + return func(_ attribute.Set) Reservoir { + return NewHistogramReservoir(cp) + } +} + +// NewHistogramReservoir returns a [HistogramReservoir] that samples the last +// measurement that falls within a histogram bucket. The histogram bucket +// upper-boundaries are define by bounds. +// +// The passed bounds must be sorted before calling this function. +func NewHistogramReservoir(bounds []float64) *HistogramReservoir { + return &HistogramReservoir{ + bounds: bounds, + storage: newStorage(len(bounds) + 1), + } +} + +var _ Reservoir = &HistogramReservoir{} + +// HistogramReservoir is a [Reservoir] that samples the last measurement that +// falls within a histogram bucket. The histogram bucket upper-boundaries are +// define by bounds. +type HistogramReservoir struct { + *storage + + // bounds are bucket bounds in ascending order. + bounds []float64 +} + +// Offer accepts the parameters associated with a measurement. The +// parameters will be stored as an exemplar if the Reservoir decides to +// sample the measurement. +// +// The passed ctx needs to contain any baggage or span that were active +// when the measurement was made. This information may be used by the +// Reservoir in making a sampling decision. +// +// The time t is the time when the measurement was made. The v and a +// parameters are the value and dropped (filtered) attributes of the +// measurement respectively. +func (r *HistogramReservoir) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { + var x float64 + switch v.Type() { + case Int64ValueType: + x = float64(v.Int64()) + case Float64ValueType: + x = v.Float64() + default: + panic("unknown value type") + } + r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go similarity index 73% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go index 80fa59554f2..ba5cd1a6b3d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/reservoir.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/reservoir.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "context" @@ -30,3 +30,11 @@ type Reservoir interface { // The Reservoir state is preserved after this call. Collect(dest *[]Exemplar) } + +// ReservoirProvider creates new [Reservoir]s. +// +// The attributes provided are attributes which are kept by the aggregation, and +// are exclusive with attributes passed to Offer. The combination of these +// attributes and the attributes passed to Offer is the complete set of +// attributes a measurement was made with. +type ReservoirProvider func(attr attribute.Set) Reservoir diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go similarity index 94% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go index 10b2976f796..0e2e26dfb18 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/storage.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/storage.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import ( "context" @@ -35,7 +35,7 @@ func (r *storage) Collect(dest *[]Exemplar) { continue } - m.Exemplar(&(*dest)[n]) + m.exemplar(&(*dest)[n]) n++ } *dest = (*dest)[:n] @@ -66,8 +66,8 @@ func newMeasurement(ctx context.Context, ts time.Time, v Value, droppedAttr []at } } -// Exemplar returns m as an [Exemplar]. -func (m measurement) Exemplar(dest *Exemplar) { +// exemplar returns m as an [Exemplar]. +func (m measurement) exemplar(dest *Exemplar) { dest.FilteredAttributes = m.FilteredAttributes dest.Time = m.Time dest.Value = m.Value diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go similarity index 84% rename from vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go rename to vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go index 9daf27dc006..590b089a806 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exemplar/value.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" +package exemplar // import "go.opentelemetry.io/otel/sdk/metric/exemplar" import "math" @@ -28,7 +28,8 @@ type Value struct { func NewValue[N int64 | float64](value N) Value { switch v := any(value).(type) { case int64: - return Value{t: Int64ValueType, val: uint64(v)} + // This can be later converted back to int64 (overflow not checked). + return Value{t: Int64ValueType, val: uint64(v)} // nolint:gosec case float64: return Value{t: Float64ValueType, val: math.Float64bits(v)} } @@ -42,7 +43,8 @@ func (v Value) Type() ValueType { return v.t } // Int64ValueType, 0 is returned. func (v Value) Int64() int64 { if v.t == Int64ValueType { - return int64(v.val) + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec } return 0 } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go index 1a3cccb6775..1969cb42cf4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/exporter.go @@ -5,14 +5,14 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // ErrExporterShutdown is returned if Export or Shutdown are called after an // Exporter has been Shutdown. -var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") +var ErrExporterShutdown = errors.New("exporter is shutdown") // Exporter handles the delivery of metric data to external receivers. This is // the final component in the metric push pipeline. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go index b52a330b3bc..c33e1a28cb4 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/instrument.go @@ -16,6 +16,7 @@ import ( "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + "go.opentelemetry.io/otel/sdk/metric/internal/x" ) var zeroScope instrumentation.Scope @@ -144,6 +145,12 @@ type Stream struct { // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to // provide an allow-list of attribute keys here. AttributeFilter attribute.Filter + // ExemplarReservoirProvider selects the + // [go.opentelemetry.io/otel/sdk/metric/exemplar.ReservoirProvider] based + // on the [Aggregation]. + // + // If unspecified, [DefaultExemplarReservoirProviderSelector] is used. + ExemplarReservoirProviderSelector ExemplarReservoirProviderSelector } // instID are the identifying properties of a instrument. @@ -184,6 +191,7 @@ var ( _ metric.Int64UpDownCounter = (*int64Inst)(nil) _ metric.Int64Histogram = (*int64Inst)(nil) _ metric.Int64Gauge = (*int64Inst)(nil) + _ x.EnabledInstrument = (*int64Inst)(nil) ) func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { @@ -196,6 +204,10 @@ func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.Record i.aggregate(ctx, val, c.Attributes()) } +func (i *int64Inst) Enabled(_ context.Context) bool { + return len(i.measures) != 0 +} + func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. for _, in := range i.measures { in(ctx, val, s) @@ -216,6 +228,7 @@ var ( _ metric.Float64UpDownCounter = (*float64Inst)(nil) _ metric.Float64Histogram = (*float64Inst)(nil) _ metric.Float64Gauge = (*float64Inst)(nil) + _ x.EnabledInstrument = (*float64Inst)(nil) ) func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { @@ -228,14 +241,18 @@ func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.Re i.aggregate(ctx, val, c.Attributes()) } +func (i *float64Inst) Enabled(_ context.Context) bool { + return len(i.measures) != 0 +} + func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { for _, in := range i.measures { in(ctx, val, s) } } -// observablID is a comparable unique identifier of an observable. -type observablID[N int64 | float64] struct { +// observableID is a comparable unique identifier of an observable. +type observableID[N int64 | float64] struct { name string description string kind InstrumentKind @@ -287,7 +304,7 @@ func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string) int type observable[N int64 | float64] struct { metric.Observable - observablID[N] + observableID[N] meter *meter measures measures[N] @@ -296,7 +313,7 @@ type observable[N int64 | float64] struct { func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string) *observable[N] { return &observable[N]{ - observablID: observablID[N]{ + observableID: observableID[N]{ name: name, description: desc, kind: kind, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go index b18ee719bd1..fde21933389 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/aggregate.go @@ -8,7 +8,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -38,8 +37,8 @@ type Builder[N int64 | float64] struct { // create new exemplar reservoirs for a new seen attribute set. // // If this is not provided a default factory function that returns an - // exemplar.Drop reservoir will be used. - ReservoirFunc func() exemplar.FilteredReservoir[N] + // dropReservoir reservoir will be used. + ReservoirFunc func(attribute.Set) FilteredExemplarReservoir[N] // AggregationLimit is the cardinality limit of measurement attributes. Any // measurement for new attributes once the limit has been reached will be // aggregated into a single aggregate for the "otel.metric.overflow" @@ -50,12 +49,12 @@ type Builder[N int64 | float64] struct { AggregationLimit int } -func (b Builder[N]) resFunc() func() exemplar.FilteredReservoir[N] { +func (b Builder[N]) resFunc() func(attribute.Set) FilteredExemplarReservoir[N] { if b.ReservoirFunc != nil { return b.ReservoirFunc } - return exemplar.Drop + return dropReservoir } type fltrMeasure[N int64 | float64] func(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go new file mode 100644 index 00000000000..8396faaa4ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/drop.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/exemplar" +) + +// dropReservoir returns a [FilteredReservoir] that drops all measurements it is offered. +func dropReservoir[N int64 | float64](attribute.Set) FilteredExemplarReservoir[N] { + return &dropRes[N]{} +} + +type dropRes[N int64 | float64] struct{} + +// Offer does nothing, all measurements offered will be dropped. +func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} + +// Collect resets dest. No exemplars will ever be returned. +func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) { + clear(*dest) // Erase elements to let GC collect objects + *dest = (*dest)[:0] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go index 170ae8e58e2..25d709948e9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exemplar.go @@ -6,7 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "sync" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -17,6 +17,7 @@ var exemplarPool = sync.Pool{ func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) { dest := exemplarPool.Get().(*[]exemplar.Exemplar) defer func() { + clear(*dest) // Erase elements to let GC collect objects. *dest = (*dest)[:0] exemplarPool.Put(dest) }() diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index c9c7e8f62a9..336ea91d1bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -31,7 +30,7 @@ const ( // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { attrs attribute.Set - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] count uint64 min N @@ -42,25 +41,25 @@ type expoHistogramDataPoint[N int64 | float64] struct { noMinMax bool noSum bool - scale int + scale int32 posBuckets expoBuckets negBuckets expoBuckets zeroCount uint64 } -func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] { +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { f := math.MaxFloat64 - max := N(f) // if N is int64, max will overflow to -9223372036854775808 - min := N(-f) + ma := N(f) // if N is int64, max will overflow to -9223372036854775808 + mi := N(-f) if N(maxInt64) > N(f) { - max = N(maxInt64) - min = N(minInt64) + ma = N(maxInt64) + mi = N(minInt64) } return &expoHistogramDataPoint[N]{ attrs: attrs, - min: max, - max: min, + min: ma, + max: mi, maxSize: maxSize, noMinMax: noMinMax, noSum: noSum, @@ -119,11 +118,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) { } // getBin returns the bin v should be recorded into. -func (p *expoHistogramDataPoint[N]) getBin(v float64) int { - frac, exp := math.Frexp(v) +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec if p.scale <= 0 { // Because of the choice of fraction is always 1 power of two higher than we want. - correction := 1 + var correction int32 = 1 if frac == .5 { // If v is an exact power of two the frac will be .5 and the exp // will be one higher than we want. @@ -131,7 +132,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int { } return (exp - correction) >> (-p.scale) } - return exp<= bin { - low = bin - high = startBin + length - 1 + low = int(bin) + high = int(startBin) + length - 1 } - count := 0 + var count int32 for high-low >= p.maxSize { low = low >> 1 high = high >> 1 @@ -189,39 +190,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int { // expoBuckets is a set of buckets in an exponential histogram. type expoBuckets struct { - startBin int + startBin int32 counts []uint64 } // record increments the count for the given bin, and expands the buckets if needed. // Size changes must be done before calling this function. -func (b *expoBuckets) record(bin int) { +func (b *expoBuckets) record(bin int32) { if len(b.counts) == 0 { b.counts = []uint64{1} b.startBin = bin return } - endBin := b.startBin + len(b.counts) - 1 + endBin := int(b.startBin) + len(b.counts) - 1 // if the new bin is inside the current range - if bin >= b.startBin && bin <= endBin { + if bin >= b.startBin && int(bin) <= endBin { b.counts[bin-b.startBin]++ return } // if the new bin is before the current start add spaces to the counts if bin < b.startBin { origLen := len(b.counts) - newLength := endBin - bin + 1 + newLength := endBin - int(bin) + 1 shift := b.startBin - bin if newLength > cap(b.counts) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+shift], b.counts[:]) + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) b.counts = b.counts[:newLength] - for i := 1; i < shift; i++ { + for i := 1; i < int(shift); i++ { b.counts[i] = 0 } b.startBin = bin @@ -229,17 +230,17 @@ func (b *expoBuckets) record(bin int) { return } // if the new is after the end add spaces to the end - if bin > endBin { - if bin-b.startBin < cap(b.counts) { + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { b.counts = b.counts[:bin-b.startBin+1] - for i := endBin + 1 - b.startBin; i < len(b.counts); i++ { + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { b.counts[i] = 0 } b.counts[bin-b.startBin] = 1 return } - end := make([]uint64, bin-b.startBin-len(b.counts)+1) + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) b.counts = append(b.counts, end...) b.counts[bin-b.startBin] = 1 } @@ -247,7 +248,7 @@ func (b *expoBuckets) record(bin int) { // downscale shrinks a bucket by a factor of 2*s. It will sum counts into the // correct lower resolution bucket. -func (b *expoBuckets) downscale(delta int) { +func (b *expoBuckets) downscale(delta int32) { // Example // delta = 2 // Original offset: -6 @@ -262,19 +263,19 @@ func (b *expoBuckets) downscale(delta int) { return } - steps := 1 << delta + steps := int32(1) << delta offset := b.startBin % steps offset = (offset + steps) % steps // to make offset positive for i := 1; i < len(b.counts); i++ { - idx := i + offset - if idx%steps == 0 { - b.counts[idx/steps] = b.counts[i] + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] continue } - b.counts[idx/steps] += b.counts[i] + b.counts[idx/int(steps)] += b.counts[i] } - lastIdx := (len(b.counts) - 1 + offset) / steps + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) b.counts = b.counts[:lastIdx+1] b.startBin = b.startBin >> delta } @@ -282,12 +283,12 @@ func (b *expoBuckets) downscale(delta int) { // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. -func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *expoHistogram[N] { +func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, maxSize: int(maxSize), - maxScale: int(maxScale), + maxScale: maxScale, newRes: r, limit: newLimiter[*expoHistogramDataPoint[N]](limit), @@ -303,9 +304,9 @@ type expoHistogram[N int64 | float64] struct { noSum bool noMinMax bool maxSize int - maxScale int + maxScale int32 - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[*expoHistogramDataPoint[N]] values map[attribute.Distinct]*expoHistogramDataPoint[N] valuesMu sync.Mutex @@ -326,7 +327,7 @@ func (e *expoHistogram[N]) measure(ctx context.Context, value N, fltrAttr attrib v, ok := e.values[attr.Equivalent()] if !ok { v = newExpoHistogramDataPoint[N](attr, e.maxSize, e.maxScale, e.noMinMax, e.noSum) - v.res = e.newRes() + v.res = e.newRes(attr) e.values[attr.Equivalent()] = v } @@ -354,15 +355,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count - hDPts[i].Scale = int32(val.scale) + hDPts[i].Scale = val.scale hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroThreshold = 0.0 - hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) - hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) @@ -407,15 +408,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count - hDPts[i].Scale = int32(val.scale) + hDPts[i].Scale = val.scale hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroThreshold = 0.0 - hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) - hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go new file mode 100644 index 00000000000..691a910608d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/filtered_reservoir.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/exemplar" +) + +// FilteredExemplarReservoir wraps a [exemplar.Reservoir] with a filter. +type FilteredExemplarReservoir[N int64 | float64] interface { + // Offer accepts the parameters associated with a measurement. The + // parameters will be stored as an exemplar if the filter decides to + // sample the measurement. + // + // The passed ctx needs to contain any baggage or span that were active + // when the measurement was made. This information may be used by the + // Reservoir in making a sampling decision. + Offer(ctx context.Context, val N, attr []attribute.KeyValue) + // Collect returns all the held exemplars in the reservoir. + Collect(dest *[]exemplar.Exemplar) +} + +// filteredExemplarReservoir handles the pre-sampled exemplar of measurements made. +type filteredExemplarReservoir[N int64 | float64] struct { + filter exemplar.Filter + reservoir exemplar.Reservoir +} + +// NewFilteredExemplarReservoir creates a [FilteredExemplarReservoir] which only offers values +// that are allowed by the filter. +func NewFilteredExemplarReservoir[N int64 | float64](f exemplar.Filter, r exemplar.Reservoir) FilteredExemplarReservoir[N] { + return &filteredExemplarReservoir[N]{ + filter: f, + reservoir: r, + } +} + +func (f *filteredExemplarReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { + if f.filter(ctx) { + // only record the current time if we are sampling this measurement. + f.reservoir.Offer(ctx, time.Now(), exemplar.NewValue(val), attr) + } +} + +func (f *filteredExemplarReservoir[N]) Collect(dest *[]exemplar.Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go index ade0941f5f5..d577ae2c198 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/histogram.go @@ -11,13 +11,12 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type buckets[N int64 | float64] struct { attrs attribute.Set - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] counts []uint64 count uint64 @@ -48,13 +47,13 @@ type histValues[N int64 | float64] struct { noSum bool bounds []float64 - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[*buckets[N]] values map[attribute.Distinct]*buckets[N] valuesMu sync.Mutex } -func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histValues[N] { +func newHistValues[N int64 | float64](bounds []float64, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have @@ -94,7 +93,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](attr, len(s.bounds)+1) - b.res = s.newRes() + b.res = s.newRes(attr) // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value @@ -109,7 +108,7 @@ func (s *histValues[N]) measure(ctx context.Context, value N, fltrAttr attribute // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. -func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func() exemplar.FilteredReservoir[N]) *histogram[N] { +func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum, limit, r), noMinMax: noMinMax, diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go index c359368403e..d3a93f085c9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/lastvalue.go @@ -9,7 +9,6 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) @@ -17,10 +16,10 @@ import ( type datapoint[N int64 | float64] struct { attrs attribute.Set value N - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] } -func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *lastValue[N] { +func newLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *lastValue[N] { return &lastValue[N]{ newRes: r, limit: newLimiter[datapoint[N]](limit), @@ -33,7 +32,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReserv type lastValue[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] start time.Time @@ -46,7 +45,7 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. attr := s.limit.Attributes(fltrAttr, s.values) d, ok := s.values[attr.Equivalent()] if !ok { - d.res = s.newRes() + d.res = s.newRes(attr) } d.attrs = attr @@ -115,7 +114,7 @@ func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N], t time.Time) in // newPrecomputedLastValue returns an aggregator that summarizes a set of // observations as the last one made. -func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *precomputedLastValue[N] { +func newPrecomputedLastValue[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedLastValue[N] { return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go index 89136692260..8e132ad6181 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/sum.go @@ -9,25 +9,24 @@ import ( "time" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type sumValue[N int64 | float64] struct { n N - res exemplar.FilteredReservoir[N] + res FilteredExemplarReservoir[N] attrs attribute.Set } // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex - newRes func() exemplar.FilteredReservoir[N] + newRes func(attribute.Set) FilteredExemplarReservoir[N] limit limiter[sumValue[N]] values map[attribute.Distinct]sumValue[N] } -func newValueMap[N int64 | float64](limit int, r func() exemplar.FilteredReservoir[N]) *valueMap[N] { +func newValueMap[N int64 | float64](limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *valueMap[N] { return &valueMap[N]{ newRes: r, limit: newLimiter[sumValue[N]](limit), @@ -42,7 +41,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S attr := s.limit.Attributes(fltrAttr, s.values) v, ok := s.values[attr.Equivalent()] if !ok { - v.res = s.newRes() + v.res = s.newRes(attr) } v.attrs = attr @@ -55,7 +54,7 @@ func (s *valueMap[N]) measure(ctx context.Context, value N, fltrAttr attribute.S // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. -func newSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *sum[N] { +func newSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -142,9 +141,9 @@ func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { } // newPrecomputedSum returns an aggregator that summarizes a set of -// observatrions as their arithmetic sum. Each sum is scoped by attributes and +// observations as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. -func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() exemplar.FilteredReservoir[N]) *precomputedSum[N] { +func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func(attribute.Set) FilteredExemplarReservoir[N]) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](limit, r), monotonic: monotonic, @@ -152,7 +151,7 @@ func newPrecomputedSum[N int64 | float64](monotonic bool, limit int, r func() ex } } -// precomputedSum summarizes a set of observatrions as their arithmetic sum. +// precomputedSum summarizes a set of observations as their arithmetic sum. type precomputedSum[N int64 | float64] struct { *valueMap[N] diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go deleted file mode 100644 index 5a0f39ae147..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/drop.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" -) - -// Drop returns a [FilteredReservoir] that drops all measurements it is offered. -func Drop[N int64 | float64]() FilteredReservoir[N] { return &dropRes[N]{} } - -type dropRes[N int64 | float64] struct{} - -// Offer does nothing, all measurements offered will be dropped. -func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {} - -// Collect resets dest. No exemplars will ever be returned. -func (r *dropRes[N]) Collect(dest *[]Exemplar) { - *dest = (*dest)[:0] -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go deleted file mode 100644 index 9fedfa4be68..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/filtered_reservoir.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// FilteredReservoir wraps a [Reservoir] with a filter. -type FilteredReservoir[N int64 | float64] interface { - // Offer accepts the parameters associated with a measurement. The - // parameters will be stored as an exemplar if the filter decides to - // sample the measurement. - // - // The passed ctx needs to contain any baggage or span that were active - // when the measurement was made. This information may be used by the - // Reservoir in making a sampling decision. - Offer(ctx context.Context, val N, attr []attribute.KeyValue) - // Collect returns all the held exemplars in the reservoir. - Collect(dest *[]Exemplar) -} - -// filteredReservoir handles the pre-sampled exemplar of measurements made. -type filteredReservoir[N int64 | float64] struct { - filter Filter - reservoir Reservoir -} - -// NewFilteredReservoir creates a [FilteredReservoir] which only offers values -// that are allowed by the filter. -func NewFilteredReservoir[N int64 | float64](f Filter, r Reservoir) FilteredReservoir[N] { - return &filteredReservoir[N]{ - filter: f, - reservoir: r, - } -} - -func (f *filteredReservoir[N]) Offer(ctx context.Context, val N, attr []attribute.KeyValue) { - if f.filter(ctx) { - // only record the current time if we are sampling this measurment. - f.reservoir.Offer(ctx, time.Now(), NewValue(val), attr) - } -} - -func (f *filteredReservoir[N]) Collect(dest *[]Exemplar) { f.reservoir.Collect(dest) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go deleted file mode 100644 index a6ff86d0271..00000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/hist.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package exemplar // import "go.opentelemetry.io/otel/sdk/metric/internal/exemplar" - -import ( - "context" - "slices" - "sort" - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// Histogram returns a [Reservoir] that samples the last measurement that falls -// within a histogram bucket. The histogram bucket upper-boundaries are define -// by bounds. -// -// The passed bounds will be sorted by this function. -func Histogram(bounds []float64) Reservoir { - slices.Sort(bounds) - return &histRes{ - bounds: bounds, - storage: newStorage(len(bounds) + 1), - } -} - -type histRes struct { - *storage - - // bounds are bucket bounds in ascending order. - bounds []float64 -} - -func (r *histRes) Offer(ctx context.Context, t time.Time, v Value, a []attribute.KeyValue) { - var x float64 - switch v.Type() { - case Int64ValueType: - x = float64(v.Int64()) - case Float64ValueType: - x = v.Float64() - default: - panic("unknown value type") - } - r.store[sort.SearchFloat64s(r.bounds, x)] = newMeasurement(ctx, t, v, a) -} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md index aba69d65471..59f736b733f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/README.md @@ -10,6 +10,7 @@ See the [Compatibility and Stability](#compatibility-and-stability) section for - [Cardinality Limit](#cardinality-limit) - [Exemplars](#exemplars) +- [Instrument Enabled](#instrument-enabled) ### Cardinality Limit @@ -102,6 +103,24 @@ Revert to the default exemplar filter (`"trace_based"`) unset OTEL_METRICS_EXEMPLAR_FILTER ``` +### Instrument Enabled + +To help users avoid performing computationally expensive operations when recording measurements, synchronous instruments provide an `Enabled` method. + +#### Examples + +The following code shows an example of how to check if an instrument implements the `EnabledInstrument` interface before using the `Enabled` function to avoid doing an expensive computation: + +```go +type enabledInstrument interface { Enabled(context.Context) bool } + +ctr, err := m.Int64Counter("expensive-counter") +c, ok := ctr.(enabledInstrument) +if !ok || c.Enabled(context.Background()) { + c.Add(expensiveComputation()) +} +``` + ## Compatibility and Stability Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md). diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go index 8cd2f37417b..a98606238ad 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/x/x.go @@ -8,41 +8,26 @@ package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x" import ( + "context" "os" "strconv" - "strings" ) -var ( - // Exemplars is an experimental feature flag that defines if exemplars - // should be recorded for metric data-points. - // - // To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable - // to the case-insensitive string value of "true" (i.e. "True" and "TRUE" - // will also enable this). - Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) { - if strings.ToLower(v) == "true" { - return v, true - } - return "", false - }) - - // CardinalityLimit is an experimental feature flag that defines if - // cardinality limits should be applied to the recorded metric data-points. - // - // To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment - // variable to the integer limit value you want to use. - // - // Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 - // will disable the cardinality limits. - CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { - n, err := strconv.Atoi(v) - if err != nil { - return 0, false - } - return n, true - }) -) +// CardinalityLimit is an experimental feature flag that defines if +// cardinality limits should be applied to the recorded metric data-points. +// +// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment +// variable to the integer limit value you want to use. +// +// Setting OTEL_GO_X_CARDINALITY_LIMIT to a value less than or equal to 0 +// will disable the cardinality limits. +var CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) { + n, err := strconv.Atoi(v) + if err != nil { + return 0, false + } + return n, true +}) // Feature is an experimental feature control flag. It provides a uniform way // to interact with these feature flags and parse their values. @@ -83,3 +68,14 @@ func (f Feature[T]) Enabled() bool { _, ok := f.Lookup() return ok } + +// EnabledInstrument informs whether the instrument is enabled. +// +// EnabledInstrument interface is implemented by synchronous instruments. +type EnabledInstrument interface { + // Enabled returns whether the instrument will process measurements for the given context. + // + // This function can be used in places where measuring an instrument + // would result in computationally expensive operations. + Enabled(context.Context) bool +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go index e0fd86ca78d..c495985bc28 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/manual_reader.go @@ -113,18 +113,17 @@ func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetr if err != nil { return err } - var errs []error for _, producer := range mr.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) + externalMetrics, e := producer.Produce(ctx) + if e != nil { + err = errors.Join(err, e) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("ManualReader collection", "Data", rm) - return unifyErrors(errs) + return err } // MarshalLog returns logging data about the ManualReader. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index 479b7610eb1..a6ccd117b80 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -150,6 +150,11 @@ func (m *meter) int64ObservableInstrument(id Instrument, callbacks []metric.Int6 continue } inst.appendMeasures(in) + + // Add the measures to the pipeline. It is required to maintain + // measures per pipeline to avoid calling the measure that + // is not part of the pipeline. + insert.pipeline.addInt64Measure(inst.observableID, in) for _, cback := range callbacks { inst := int64Observer{measures: in} fn := cback @@ -185,6 +190,11 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser // configured with options. The instrument is used to asynchronously record // int64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := Instrument{ @@ -201,6 +211,11 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 // configured with options. The instrument is used to asynchronously record // instantaneous int64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { cfg := metric.NewInt64ObservableGaugeConfig(options...) id := Instrument{ @@ -299,6 +314,11 @@ func (m *meter) float64ObservableInstrument(id Instrument, callbacks []metric.Fl continue } inst.appendMeasures(in) + + // Add the measures to the pipeline. It is required to maintain + // measures per pipeline to avoid calling the measure that + // is not part of the pipeline. + insert.pipeline.addFloat64Measure(inst.observableID, in) for _, cback := range callbacks { inst := float64Observer{measures: in} fn := cback @@ -334,6 +354,11 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O // and configured with options. The instrument is used to asynchronously record // float64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := Instrument{ @@ -350,6 +375,11 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl // configured with options. The instrument is used to asynchronously record // instantaneous float64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := Instrument{ @@ -421,73 +451,80 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return noopRegister{}, nil } - reg := newObserver() - var errs multierror + var err error + validInstruments := make([]metric.Observable, 0, len(insts)) for _, inst := range insts { - // Unwrap any global. - if u, ok := inst.(interface { - Unwrap() metric.Observable - }); ok { - inst = u.Unwrap() - } - switch o := inst.(type) { case int64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) + if e := o.registerable(m); e != nil { + if !errors.Is(e, errEmptyAgg) { + err = errors.Join(err, e) } continue } - reg.registerInt64(o.observablID) + + validInstruments = append(validInstruments, inst) case float64Observable: - if err := o.registerable(m); err != nil { - if !errors.Is(err, errEmptyAgg) { - errs.append(err) + if e := o.registerable(m); e != nil { + if !errors.Is(e, errEmptyAgg) { + err = errors.Join(err, e) } continue } - reg.registerFloat64(o.observablID) + + validInstruments = append(validInstruments, inst) default: // Instrument external to the SDK. - return nil, fmt.Errorf("invalid observable: from different implementation") + return nil, errors.New("invalid observable: from different implementation") } } - err := errs.errorOrNil() - if reg.len() == 0 { + if len(validInstruments) == 0 { // All insts use drop aggregation or are invalid. return noopRegister{}, err } - // Some or all instruments were valid. - cback := func(ctx context.Context) error { return f(ctx, reg) } - return m.pipes.registerMultiCallback(cback), err + unregs := make([]func(), len(m.pipes)) + for ix, pipe := range m.pipes { + reg := newObserver(pipe) + for _, inst := range validInstruments { + switch o := inst.(type) { + case int64Observable: + reg.registerInt64(o.observableID) + case float64Observable: + reg.registerFloat64(o.observableID) + } + } + + // Some or all instruments were valid. + cBack := func(ctx context.Context) error { return f(ctx, reg) } + unregs[ix] = pipe.addMultiCallback(cBack) + } + + return unregisterFuncs{f: unregs}, err } type observer struct { embedded.Observer - float64 map[observablID[float64]]struct{} - int64 map[observablID[int64]]struct{} + pipe *pipeline + float64 map[observableID[float64]]struct{} + int64 map[observableID[int64]]struct{} } -func newObserver() observer { +func newObserver(p *pipeline) observer { return observer{ - float64: make(map[observablID[float64]]struct{}), - int64: make(map[observablID[int64]]struct{}), + pipe: p, + float64: make(map[observableID[float64]]struct{}), + int64: make(map[observableID[int64]]struct{}), } } -func (r observer) len() int { - return len(r.float64) + len(r.int64) -} - -func (r observer) registerFloat64(id observablID[float64]) { +func (r observer) registerFloat64(id observableID[float64]) { r.float64[id] = struct{}{} } -func (r observer) registerInt64(id observablID[int64]) { +func (r observer) registerInt64(id observableID[int64]) { r.int64[id] = struct{}{} } @@ -501,22 +538,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... switch conv := o.(type) { case float64Observable: oImpl = conv - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(float64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } default: global.Error(errUnknownObserver, "failed to record") return } - if _, registered := r.float64[oImpl.observablID]; !registered { + if _, registered := r.float64[oImpl.observableID]; !registered { if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -528,7 +555,12 @@ func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ... return } c := metric.NewObserveConfig(opts) - oImpl.observe(v, c.Attributes()) + // Access to r.pipe.float64Measure is already guarded by a lock in pipeline.produce. + // TODO (#5946): Refactor pipeline and observable measures. + measures := r.pipe.float64Measures[oImpl.observableID] + for _, m := range measures { + m(context.Background(), v, c.Attributes()) + } } func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { @@ -536,22 +568,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric switch conv := o.(type) { case int64Observable: oImpl = conv - case interface { - Unwrap() metric.Observable - }: - // Unwrap any global. - async := conv.Unwrap() - var ok bool - if oImpl, ok = async.(int64Observable); !ok { - global.Error(errUnknownObserver, "failed to record asynchronous") - return - } default: global.Error(errUnknownObserver, "failed to record") return } - if _, registered := r.int64[oImpl.observablID]; !registered { + if _, registered := r.int64[oImpl.observableID]; !registered { if !oImpl.dropAggregation { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, @@ -563,7 +585,12 @@ func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric return } c := metric.NewObserveConfig(opts) - oImpl.observe(v, c.Attributes()) + // Access to r.pipe.int64Measures is already guarded b a lock in pipeline.produce. + // TODO (#5946): Refactor pipeline and observable measures. + measures := r.pipe.int64Measures[oImpl.observableID] + for _, m := range measures { + m(context.Background(), v, c.Attributes()) + } } type noopRegister struct{ embedded.Registration } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go index 67ee1b11a2e..dcd2182d9a1 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/periodic_reader.go @@ -251,18 +251,17 @@ func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricd if err != nil { return err } - var errs []error for _, producer := range r.externalProducers.Load().([]Producer) { - externalMetrics, err := producer.Produce(ctx) - if err != nil { - errs = append(errs, err) + externalMetrics, e := producer.Produce(ctx) + if e != nil { + err = errors.Join(err, e) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("PeriodicReader collection", "Data", rm) - return unifyErrors(errs) + return err } // export exports metric data m using r's exporter. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go index 823bf2fe3d2..775e2452619 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/pipeline.go @@ -8,14 +8,13 @@ import ( "context" "errors" "fmt" - "strings" "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/exemplar" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/internal/x" @@ -38,14 +37,17 @@ type instrumentSync struct { compAgg aggregate.ComputeAggregation } -func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { +func newPipeline(res *resource.Resource, reader Reader, views []View, exemplarFilter exemplar.Filter) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ - resource: res, - reader: reader, - views: views, + resource: res, + reader: reader, + views: views, + int64Measures: map[observableID[int64]][]aggregate.Measure[int64]{}, + float64Measures: map[observableID[float64]][]aggregate.Measure[float64]{}, + exemplarFilter: exemplarFilter, // aggregations is lazy allocated when needed. } } @@ -63,9 +65,26 @@ type pipeline struct { views []View sync.Mutex - aggregations map[instrumentation.Scope][]instrumentSync - callbacks []func(context.Context) error - multiCallbacks list.List + int64Measures map[observableID[int64]][]aggregate.Measure[int64] + float64Measures map[observableID[float64]][]aggregate.Measure[float64] + aggregations map[instrumentation.Scope][]instrumentSync + callbacks []func(context.Context) error + multiCallbacks list.List + exemplarFilter exemplar.Filter +} + +// addInt64Measure adds a new int64 measure to the pipeline for each observer. +func (p *pipeline) addInt64Measure(id observableID[int64], m []aggregate.Measure[int64]) { + p.Lock() + defer p.Unlock() + p.int64Measures[id] = m +} + +// addFloat64Measure adds a new float64 measure to the pipeline for each observer. +func (p *pipeline) addFloat64Measure(id observableID[float64], m []aggregate.Measure[float64]) { + p.Lock() + defer p.Unlock() + p.float64Measures[id] = m } // addSync adds the instrumentSync to pipeline p with scope. This method is not @@ -105,14 +124,15 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) p.Lock() defer p.Unlock() - var errs multierror + var err error for _, c := range p.callbacks { // TODO make the callbacks parallel. ( #3034 ) - if err := c(ctx); err != nil { - errs.append(err) + if e := c(ctx); e != nil { + err = errors.Join(err, e) } if err := ctx.Err(); err != nil { rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -120,12 +140,13 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) f := e.Value.(multiCallback) - if err := f(ctx); err != nil { - errs.append(err) + if e := f(ctx); e != nil { + err = errors.Join(err, e) } if err := ctx.Err(); err != nil { // This means the context expired before we finished running callbacks. rm.Resource = nil + clear(rm.ScopeMetrics) // Erase elements to let GC collect objects. rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } @@ -157,7 +178,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) rm.ScopeMetrics = rm.ScopeMetrics[:i] - return errs.errorOrNil() + return err } // inserter facilitates inserting of new instruments from a single scope into a @@ -219,7 +240,7 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures []aggregate.Measure[N] ) - errs := &multierror{wrapped: errCreatingAggregators} + var err error seen := make(map[uint64]struct{}) for _, v := range i.pipeline.views { stream, match := v(inst) @@ -227,9 +248,9 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) continue } matched = true - in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) + in, id, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if e != nil { + err = errors.Join(err, e) } if in == nil { // Drop aggregation. continue @@ -242,8 +263,12 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) measures = append(measures, in) } + if err != nil { + err = errors.Join(errCreatingAggregators, err) + } + if matched { - return measures, errs.errorOrNil() + return measures, err } // Apply implicit default view if no explicit matched. @@ -252,15 +277,18 @@ func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) Description: inst.Description, Unit: inst.Unit, } - in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) - if err != nil { - errs.append(err) + in, _, e := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) + if e != nil { + if err == nil { + err = errCreatingAggregators + } + err = errors.Join(err, e) } if in != nil { // Ensured to have not seen given matched was false. measures = append(measures, in) } - return measures, errs.errorOrNil() + return measures, err } // addCallback registers a single instrument callback to be run when @@ -329,6 +357,9 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum // The view explicitly requested the default aggregation. stream.Aggregation = DefaultAggregationSelector(kind) } + if stream.ExemplarReservoirProviderSelector == nil { + stream.ExemplarReservoirProviderSelector = DefaultExemplarReservoirProviderSelector + } if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { return nil, 0, fmt.Errorf( @@ -349,7 +380,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), - ReservoirFunc: reservoirFunc[N](stream.Aggregation), + ReservoirFunc: reservoirFunc[N](stream.ExemplarReservoirProviderSelector(stream.Aggregation), i.pipeline.exemplarFilter), } b.Filter = stream.AttributeFilter // A value less than or equal to zero will disable the aggregation @@ -552,24 +583,16 @@ func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { // measurement. type pipelines []*pipeline -func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { +func newPipelines(res *resource.Resource, readers []Reader, views []View, exemplarFilter exemplar.Filter) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { - p := newPipeline(res, r, views) + p := newPipeline(res, r, views, exemplarFilter) r.register(p) pipes = append(pipes, p) } return pipes } -func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { - unregs := make([]func(), len(p)) - for i, pipe := range p { - unregs[i] = pipe.addMultiCallback(c) - } - return unregisterFuncs{f: unregs} -} - type unregisterFuncs struct { embedded.Registration f []func() @@ -602,15 +625,15 @@ func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) reso func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] - errs := &multierror{} + var err error for _, i := range r.inserters { - in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) - if err != nil { - errs.append(err) + in, e := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) + if e != nil { + err = errors.Join(err, e) } measures = append(measures, in...) } - return measures, errs.errorOrNil() + return measures, err } // HistogramAggregators returns the histogram Aggregators that must be updated by the instrument @@ -619,37 +642,18 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] - errs := &multierror{} + var err error for _, i := range r.inserters { agg := i.readerDefaultAggregation(id.Kind) if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { histAgg.Boundaries = boundaries agg = histAgg } - in, err := i.Instrument(id, agg) - if err != nil { - errs.append(err) + in, e := i.Instrument(id, agg) + if e != nil { + err = errors.Join(err, e) } measures = append(measures, in...) } - return measures, errs.errorOrNil() -} - -type multierror struct { - wrapped error - errors []string -} - -func (m *multierror) errorOrNil() error { - if len(m.errors) == 0 { - return nil - } - if m.wrapped == nil { - return errors.New(strings.Join(m.errors, "; ")) - } - return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) -} - -func (m *multierror) append(err error) { - m.errors = append(m.errors, err.Error()) + return measures, err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go index a82af538e67..2fca89e5a8e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/provider.go @@ -42,7 +42,7 @@ func NewMeterProvider(options ...Option) *MeterProvider { flush, sdown := conf.readerSignals() mp := &MeterProvider{ - pipes: newPipelines(conf.res, conf.readers, conf.views), + pipes: newPipelines(conf.res, conf.readers, conf.views, conf.exemplarFilter), forceFlush: flush, shutdown: sdown, } @@ -76,15 +76,17 @@ func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metri c := metric.NewMeterConfig(options...) s := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } global.Info("Meter created", "Name", s.Name, "Version", s.Version, "SchemaURL", s.SchemaURL, + "Attributes", s.Attributes, ) return mp.meters.Lookup(s, func() *meter { diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index a55f9a5372c..d13a7069788 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -5,26 +5,26 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" - "fmt" + "errors" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // errDuplicateRegister is logged by a Reader when an attempt to registered it // more than once occurs. -var errDuplicateRegister = fmt.Errorf("duplicate reader registration") +var errDuplicateRegister = errors.New("duplicate reader registration") // ErrReaderNotRegistered is returned if Collect or Shutdown are called before // the reader is registered with a MeterProvider. -var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") +var ErrReaderNotRegistered = errors.New("reader is not registered") // ErrReaderShutdown is returned if Collect or Shutdown are called after a // reader has been Shutdown once. -var ErrReaderShutdown = fmt.Errorf("reader is shutdown") +var ErrReaderShutdown = errors.New("reader is shutdown") // errNonPositiveDuration is logged when an environmental variable // has non-positive value. -var errNonPositiveDuration = fmt.Errorf("non-positive duration") +var errNonPositiveDuration = errors.New("non-positive duration") // Reader is the interface used between the SDK and an // exporter. Control flow is bi-directional through the @@ -34,7 +34,7 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration") // start of bi-directional control flow. // // Typically, push-based exporters that are periodic will -// implement PeroidicExporter themselves and construct a +// implement PeriodicExporter themselves and construct a // PeriodicReader to satisfy this interface. // // Pull-based exporters will typically implement Register @@ -60,8 +60,8 @@ type Reader interface { aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. // Collect gathers and returns all metric data related to the Reader from - // the SDK and stores it in out. An error is returned if this is called - // after Shutdown or if out is nil. + // the SDK and stores it in rm. An error is returned if this is called + // after Shutdown or if rm is nil. // // This method needs to be concurrent safe, and the cancellation of the // passed context is expected to be honored. diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index dade0a19a2b..1cd181626d3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.28.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go index cd08c673248..630890f4263 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/view.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/view.go @@ -96,11 +96,12 @@ func NewView(criteria Instrument, mask Stream) View { return func(i Instrument) (Stream, bool) { if matchFunc(i) { return Stream{ - Name: nonZero(mask.Name, i.Name), - Description: nonZero(mask.Description, i.Description), - Unit: nonZero(mask.Unit, i.Unit), - Aggregation: agg, - AttributeFilter: mask.AttributeFilter, + Name: nonZero(mask.Name, i.Name), + Description: nonZero(mask.Description, i.Description), + Unit: nonZero(mask.Unit, i.Unit), + Aggregation: agg, + AttributeFilter: mask.AttributeFilter, + ExemplarReservoirProviderSelector: mask.ExemplarReservoirProviderSelector, }, true } return Stream{}, false diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d49..c02aeefdde5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b4..cf3c88e15cd 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 4ce757dfd6b..ccc97e1b662 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebda..185aa7c08f7 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go index d2d1f72466b..9b672a1d70d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -5,7 +5,6 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" - "fmt" "os" "strconv" "strings" @@ -26,7 +25,7 @@ const ( type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { - return fmt.Sprintf("unsupported sampler: %s", string(e)) + return "unsupported sampler: " + string(e) } var ( @@ -39,7 +38,7 @@ type samplerArgParseError struct { } func (e samplerArgParseError) Error() string { - return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) + return "parsing sampler argument: " + e.parseErr.Error() } func (e samplerArgParseError) Unwrap() error { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 730fb85c3ef..8f4fc385082 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -347,54 +347,99 @@ func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { } switch attr.Value.Type() { case attribute.STRING: - if v := attr.Value.AsString(); len(v) > limit { - return attr.Key.String(safeTruncate(v, limit)) - } + v := attr.Value.AsString() + return attr.Key.String(truncate(limit, v)) case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { - if len(v[i]) > limit { - v[i] = safeTruncate(v[i], limit) - } + v[i] = truncate(limit, v[i]) } return attr.Key.StringSlice(v) } return attr } -// safeTruncate truncates the string and guarantees valid UTF-8 is returned. -func safeTruncate(input string, limit int) string { - if trunc, ok := safeTruncateValidUTF8(input, limit); ok { - return trunc +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s } - trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) - return trunc -} -// safeTruncateValidUTF8 returns a copy of the input string safely truncated to -// limit. The truncation is ensured to occur at the bounds of complete UTF-8 -// characters. If invalid encoding of UTF-8 is encountered, input is returned -// with false, otherwise, the truncated input will be returned with true. -func safeTruncateValidUTF8(input string, limit int) (string, bool) { - for cnt := 0; cnt <= limit; { - r, size := utf8.DecodeRuneInString(input[cnt:]) - if r == utf8.RuneError { - return input, false + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue } - if cnt+size > limit { - return input[:cnt], true + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue } - cnt += size + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ } - return input, true + + return b.String() } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // -// The only SpanOption currently supported is WithTimestamp which will set the -// end time for a Span's life-cycle. +// The only SpanEndOption currently supported are [trace.WithTimestamp], and +// [trace.WithStackTrace]. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. @@ -639,10 +684,7 @@ func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index 7aababbbf2f..732669a17ad 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -69,6 +69,19 @@ func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { return dst } +// Reset clears the recorded spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Reset() { + sr.startedMu.Lock() + sr.endedMu.Lock() + defer sr.startedMu.Unlock() + defer sr.endedMu.Unlock() + + sr.started = nil + sr.ended = nil +} + // Ended returns a copy of all ended spans that have been recorded. // // This method is safe to be called concurrently. diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index dc1eaa8e9d0..ba7db488950 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.31.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index 273d58e0014..9c0b720a4d6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -213,7 +213,7 @@ var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these +// option is provided to a Span's start event. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 6d3c7b1f40e..fb7d12673eb 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.31.0" + return "1.33.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index cdebdb5eb78..9f878cd1fe7 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.31.0 + version: v1.33.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.53.0 + version: v0.55.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.7.0 + version: v0.9.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.10 + version: v0.0.12 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go index bca86dc4422..541664bd4ec 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go @@ -210,6 +210,24 @@ func (DataPointFlags) EnumDescriptor() ([]byte, []int) { // storage, OR can be embedded by other protocols that transfer OTLP metrics // data but do not implement the OTLP protocol. // +// MetricsData +// └─── ResourceMetrics +// ├── Resource +// ├── SchemaURL +// └── ScopeMetrics +// ├── Scope +// ├── SchemaURL +// └── Metric +// ├── Name +// ├── Description +// ├── Unit +// └── data +// ├── Gauge +// ├── Sum +// ├── Histogram +// ├── ExponentialHistogram +// └── Summary +// // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. @@ -417,7 +435,6 @@ func (x *ScopeMetrics) GetSchemaUrl() string { // // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md // -// // The data model and relation between entities is shown in the // diagram below. Here, "DataPoint" is the term used to refer to any // one of the specific data point value types, and "points" is the term used @@ -429,7 +446,7 @@ func (x *ScopeMetrics) GetSchemaUrl() string { // - DataPoint contains timestamps, attributes, and one of the possible value type // fields. // -// Metric +// Metric // +------------+ // |name | // |description | @@ -914,6 +931,9 @@ func (x *ExponentialHistogram) GetAggregationTemporality() AggregationTemporalit // data type. These data points cannot always be merged in a meaningful way. // While they can be useful in some applications, histogram data points are // recommended for new applications. +// Summary metrics do not have an aggregation temporality field. This is +// because the count and sum fields of a SummaryDataPoint are assumed to be +// cumulative values. type Summary struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1508,7 +1528,8 @@ func (x *ExponentialHistogramDataPoint) GetZeroThreshold() float64 { } // SummaryDataPoint is a single data point in a timeseries that describes the -// time-varying values of a Summary metric. +// time-varying values of a Summary metric. The count and sum fields represent +// cumulative values. type SummaryDataPoint struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index b67897f76b5..f58bbc7ba4d 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -22,10 +22,12 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc sorts the slice x in ascending order as determined by the cmp // function. This sort is not guaranteed to be stable. // cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b. +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. // // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 00000000000..cf66309c4a8 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 00000000000..0c1b8679376 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, f +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, f +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 00000000000..e31e35a9045 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 00000000000..065ff3dfa52 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 00000000000..ec5a6380335 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 00000000000..cd0a8ac1545 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 00000000000..2a938864cb9 --- /dev/null +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,783 @@ +// Code generated by go generate gen.go; DO NOT EDIT. + +//go:generate go run gen.go + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x1a06 + AcceptCharset Atom = 0x1a0e + Accesskey Atom = 0x2c09 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 + Address Atom = 0x6f307 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 + Annotation Atom = 0x1c90a + AnnotationXml Atom = 0x1c90e + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 + B Atom = 0x101 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 + Bdo Atom = 0x14b03 + Bgsound Atom = 0x15e07 + Big Atom = 0x17003 + Blink Atom = 0x17305 + Blockquote Atom = 0x1870a + Body Atom = 0x2804 + Br Atom = 0x202 + Button Atom = 0x19106 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 + Charset Atom = 0x2107 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 + Color Atom = 0x1bf05 + Cols Atom = 0x1c404 + Colspan Atom = 0x1c407 + Command Atom = 0x1d707 + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b + Controls Atom = 0x1de08 + Coords Atom = 0x1ea06 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 + Disabled Atom = 0x16408 + Div Atom = 0x16b03 + Dl Atom = 0x5e602 + Download Atom = 0x46308 + Draggable Atom = 0x17a09 + Dropzone Atom = 0x40508 + Dt Atom = 0x64b02 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 + H1 Atom = 0x15c02 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 + H6 Atom = 0x64d02 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 + Hr Atom = 0x15702 + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a + I Atom = 0x601 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 + Is Atom = 0x16502 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 + Itemscope Atom = 0x67109 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 + Keygen Atom = 0x3206 + Keytype Atom = 0xd607 + Kind Atom = 0x17704 + Label Atom = 0x5905 + Lang Atom = 0x2e404 + Legend Atom = 0x18106 + Li Atom = 0xb202 + Link Atom = 0x17404 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 + Main Atom = 0x1004 + Malignmark Atom = 0xb00a + Manifest Atom = 0x6d708 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 + Ms Atom = 0x67402 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 + Nav Atom = 0x1303 + Nobr Atom = 0x3704 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 + Onabort Atom = 0x19507 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 + Onauxclick Atom = 0x61f0a + Onbeforeprint Atom = 0x69e0d + Onbeforeunload Atom = 0x6e70e + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 + Oncanplay Atom = 0x14d09 + Oncanplaythrough Atom = 0x14d10 + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a + Onratechange Atom = 0x5cc0c + Onrejectionhandled Atom = 0x5d812 + Onreset Atom = 0x5ea07 + Onresize Atom = 0x5f108 + Onscroll Atom = 0x60008 + Onsecuritypolicyviolation Atom = 0x60819 + Onseeked Atom = 0x62908 + Onseeking Atom = 0x63109 + Onselect Atom = 0x63a08 + Onshow Atom = 0x64406 + Onsort Atom = 0x64f06 + Onstalled Atom = 0x65909 + Onstorage Atom = 0x66209 + Onsubmit Atom = 0x66b08 + Onsuspend Atom = 0x67b09 + Ontimeupdate Atom = 0x400c + Ontoggle Atom = 0x68408 + Onunhandledrejection Atom = 0x68c14 + Onunload Atom = 0x6ab08 + Onvolumechange Atom = 0x6b30e + Onwaiting Atom = 0x6c109 + Onwheel Atom = 0x6ca07 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 + Optimum Atom = 0x6d107 + Option Atom = 0x6e306 + Output Atom = 0x51d06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 + Radiogroup Atom = 0x30a + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 + Rt Atom = 0x19a02 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 + S Atom = 0x2501 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 + Scope Atom = 0x67505 + Scoped Atom = 0x67506 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 + Select Atom = 0x63c06 + Selected Atom = 0x63c08 + Shape Atom = 0x1e505 + Size Atom = 0x5f504 + Sizes Atom = 0x5f505 + Slot Atom = 0x1ef04 + Small Atom = 0x20605 + Sortable Atom = 0x65108 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 + Srclang Atom = 0x5f907 + Srcset Atom = 0x6f906 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 + Strong Atom = 0x6dd06 + Style Atom = 0x6ff05 + Sub Atom = 0x66d03 + Summary Atom = 0x70407 + Sup Atom = 0x70b03 + Svg Atom = 0x70e03 + System Atom = 0x71106 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 + Tbody Atom = 0x2705 + Td Atom = 0x9202 + Template Atom = 0x71408 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 + Th Atom = 0x15602 + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d + U Atom = 0xb01 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 + Value Atom = 0x1505 + Var Atom = 0x16d03 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 + Width Atom = 0x64905 + Workertype Atom = 0x71c0a + Wrap Atom = 0x72604 + Xmp Atom = 0x12f03 +) + +const hash0 = 0x81cdf10e + +const maxAtomLen = 25 + +var table = [1 << 9]Atom{ + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang + 0x4: 0x2c09, // accesskey + 0x5: 0x8b08, // frameset + 0x7: 0x63a08, // onselect + 0x8: 0x71106, // system + 0xa: 0x64905, // width + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange + 0x10: 0x14b03, // bdo + 0x11: 0x11505, // audio + 0x12: 0x17a09, // draggable + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime + 0x1c: 0x19507, // onabort + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel + 0x22: 0x62908, // onseeked + 0x23: 0x30205, // image + 0x24: 0x5d812, // onrejectionhandled + 0x26: 0x17404, // link + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying + 0x2c: 0x1c407, // colspan + 0x2f: 0x1bf05, // color + 0x30: 0x5f504, // size + 0x31: 0x2e80a, // http-equiv + 0x33: 0x601, // i + 0x34: 0x5590a, // onpagehide + 0x35: 0x68c14, // onunhandledrejection + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont + 0x3f: 0x1303, // nav + 0x40: 0x17704, // kind + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden + 0x47: 0x70e03, // svg + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col + 0x4d: 0x1870a, // blockquote + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress + 0x51: 0x5f505, // sizes + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup + 0x62: 0x16502, // is + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max + 0x72: 0x1ea06, // coords + 0x73: 0x30d02, // h3 + 0x74: 0x6e70e, // onbeforeunload + 0x75: 0x9c04, // rows + 0x76: 0x63c06, // select + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers + 0x85: 0x5cc0c, // onratechange + 0x86: 0x60819, // onsecuritypolicyviolation + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown + 0x8a: 0x1ef04, // slot + 0x8b: 0x4b010, // onloadedmetadata + 0x8c: 0x1a06, // accept + 0x8d: 0x26806, // object + 0x91: 0x6b30e, // onvolumechange + 0x92: 0x2107, // charset + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest + 0x95: 0x2804, // body + 0x96: 0x10a07, // default + 0x97: 0x63c08, // selected + 0x98: 0x21e04, // face + 0x99: 0x1e505, // shape + 0x9b: 0x68408, // ontoggle + 0x9e: 0x64b02, // dt + 0x9f: 0xb604, // mark + 0xa1: 0xb01, // u + 0xa4: 0x6ab08, // onunload + 0xa5: 0x5d04, // loop + 0xa6: 0x16408, // disabled + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark + 0xad: 0x67b09, // onsuspend + 0xae: 0x35105, // mtext + 0xaf: 0x64f06, // onsort + 0xb0: 0x19d08, // itemprop + 0xb3: 0x67109, // itemscope + 0xb4: 0x17305, // blink + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame + 0xbb: 0x1505, // value + 0xbc: 0x66209, // onstorage + 0xbf: 0xaa07, // acronym + 0xc0: 0x19a02, // rt + 0xc2: 0x202, // br + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed + 0xc7: 0x69e0d, // onbeforeprint + 0xc8: 0x19106, // button + 0xc9: 0x2f507, // onclick + 0xca: 0x70407, // summary + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart + 0xd9: 0x16b03, // div + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q + 0xee: 0x61f0a, // onauxclick + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base + 0xf3: 0x6e306, // option + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone + 0xfb: 0x67505, // scope + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp + 0x100: 0x5f907, // srclang + 0x101: 0x30703, // img + 0x104: 0x101, // b + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod + 0x10a: 0x72604, // wrap + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span + 0x119: 0x6d708, // manifest + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content + 0x11d: 0x6c109, // onwaiting + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon + 0x12c: 0x64d02, // h6 + 0x12d: 0x1c404, // cols + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown + 0x130: 0x66b08, // onsubmit + 0x131: 0x14d09, // oncanplay + 0x132: 0x70b03, // sup + 0x133: 0xc01, // p + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove + 0x13c: 0x66d03, // sub + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete + 0x14f: 0x15702, // hr + 0x150: 0x1de08, // controls + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause + 0x158: 0x5e602, // dl + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload + 0x160: 0x71408, // template + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src + 0x165: 0x6dd06, // strong + 0x167: 0x7804, // samp + 0x168: 0x6f307, // address + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small + 0x16e: 0x6ca07, // onwheel + 0x16f: 0x1c90a, // annotation + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus + 0x174: 0xc05, // param + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href + 0x17c: 0x5f108, // onresize + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title + 0x183: 0x6ff05, // style + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe + 0x187: 0x1004, // main + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy + 0x192: 0x15602, // th + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section + 0x197: 0x6d107, // optimum + 0x198: 0x2db04, // high + 0x199: 0x15c02, // h1 + 0x19a: 0x65909, // onstalled + 0x19b: 0x16d03, // var + 0x19c: 0x4204, // time + 0x19e: 0x67402, // ms + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center + 0x1a4: 0x3704, // nobr + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing + 0x1a7: 0x18106, // legend + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate + 0x1b8: 0x71c0a, // workertype + 0x1ba: 0x3cd07, // itemref + 0x1bd: 0x1, // a + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate + 0x1c0: 0x15e07, // bgsound + 0x1c1: 0x3206, // keygen + 0x1c2: 0x2705, // tbody + 0x1c5: 0x64406, // onshow + 0x1c7: 0x2501, // s + 0x1c8: 0x6607, // pattern + 0x1cc: 0x14d10, // oncanplaythrough + 0x1ce: 0x2d702, // dd + 0x1cf: 0x6f906, // srcset + 0x1d0: 0x17003, // big + 0x1d2: 0x65108, // sortable + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop + 0x1da: 0x60008, // onscroll + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow + 0x1dd: 0x4, // abbr + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout + 0x1e6: 0x5ea07, // onreset + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking + 0x1ea: 0x67506, // scoped + 0x1ec: 0x30a, // radiogroup + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex + 0x1f7: 0x1a0e, // accept-charset + 0x1f8: 0x2ae0e, // formnovalidate + 0x1fb: 0x1c90e, // annotation-xml + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog + 0x1ff: 0x1d707, // command +} + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + + "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + + "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + + "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/charset/charset.go b/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 00000000000..13bed1599f7 --- /dev/null +++ b/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go new file mode 100644 index 00000000000..ff7acf2d5b4 --- /dev/null +++ b/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,111 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.4.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "li": true, + "link": true, + "listing": true, + "main": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "math": + switch element.Data { + case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": + return true + } + case "svg": + switch element.Data { + case "foreignObject", "desc", "title": + return true + } + } + return false +} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 00000000000..885c4c5936b --- /dev/null +++ b/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,122 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case html.ErrorToken: + return z.Err() + case html.TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case html.StartTagToken, html.EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == html.StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + for n := range doc.Descendants() { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + } + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization + +# Security Considerations + +Care should be taken when parsing and interpreting HTML, whether full documents +or fragments, within the framework of the HTML specification, especially with +regard to untrusted inputs. + +This package provides both a tokenizer and a parser, which implement the +tokenization, and tokenization and tree construction stages of the WHATWG HTML +parsing specification respectively. While the tokenizer parses and normalizes +individual HTML tokens, only the parser constructs the DOM tree from the +tokenized HTML, as described in the tree construction stage of the +specification, dynamically modifying or extending the document's DOM tree. + +If your use case requires semantically well-formed HTML documents, as defined by +the WHATWG specification, the parser should be used rather than the tokenizer. + +In security contexts, if trust decisions are being made using the tokenized or +parsed content, the input must be re-serialized (for instance by using Render or +Token.String) in order for those trust decisions to hold, as the process of +tokenization or parsing may alter the content. +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 00000000000..c484e5a94fb --- /dev/null +++ b/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 00000000000..b628880a014 --- /dev/null +++ b/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 00000000000..04c6bec2107 --- /dev/null +++ b/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,339 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a' byte that, per above, we'd like to avoid escaping unless we have to. +// +// Studying the summary table (and T actions in its '>' column) closely, we +// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the +// start of the comment data. State 52 is after a '!'. The other three states +// are after a '-'. +// +// Our algorithm is thus to escape every '&' and to escape '>' if and only if: +// - The '>' is after a '!' or '-' (in the unescaped data) or +// - The '>' is at the start of the comment data (after the opening ""); err != nil { + return err + } + return nil + case DoctypeNode: + if _, err := w.WriteString("') + case RawNode: + _, err := w.WriteString(n.Data) + return err + default: + return errors.New("html: unknown node type") + } + + // Render the opening tag. + if err := w.WriteByte('<'); err != nil { + return err + } + if _, err := w.WriteString(n.Data); err != nil { + return err + } + for _, a := range n.Attr { + if err := w.WriteByte(' '); err != nil { + return err + } + if a.Namespace != "" { + if _, err := w.WriteString(a.Namespace); err != nil { + return err + } + if err := w.WriteByte(':'); err != nil { + return err + } + } + if _, err := w.WriteString(a.Key); err != nil { + return err + } + if _, err := w.WriteString(`="`); err != nil { + return err + } + if err := escape(w, a.Val); err != nil { + return err + } + if err := w.WriteByte('"'); err != nil { + return err + } + } + if voidElements[n.Data] { + if n.FirstChild != nil { + return fmt.Errorf("html: void element <%s> has child nodes", n.Data) + } + _, err := w.WriteString("/>") + return err + } + if err := w.WriteByte('>'); err != nil { + return err + } + + // Add initial newline where there is danger of a newline beging ignored. + if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") { + switch n.Data { + case "pre", "listing", "textarea": + if err := w.WriteByte('\n'); err != nil { + return err + } + } + } + + // Render any child nodes + if childTextNodesAreLiteral(n) { + for c := n.FirstChild; c != nil; c = c.NextSibling { + if c.Type == TextNode { + if _, err := w.WriteString(c.Data); err != nil { + return err + } + } else { + if err := render1(w, c); err != nil { + return err + } + } + } + if n.Data == "plaintext" { + // Don't render anything else. must be the + // last element in the file, with no closing tag. + return plaintextAbort + } + } else { + for c := n.FirstChild; c != nil; c = c.NextSibling { + if err := render1(w, c); err != nil { + return err + } + } + } + + // Render the </xxx> closing tag. + if _, err := w.WriteString("</"); err != nil { + return err + } + if _, err := w.WriteString(n.Data); err != nil { + return err + } + return w.WriteByte('>') +} + +func childTextNodesAreLiteral(n *Node) bool { + // Per WHATWG HTML 13.3, if the parent of the current node is a style, + // script, xmp, iframe, noembed, noframes, or plaintext element, and the + // current node is a text node, append the value of the node's data + // literally. The specification is not explicit about it, but we only + // enforce this if we are in the HTML namespace (i.e. when the namespace is + // ""). + // NOTE: we also always include noscript elements, although the + // specification states that they should only be rendered as such if + // scripting is enabled for the node (which is not something we track). + if n.Namespace != "" { + return false + } + switch n.Data { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp": + return true + default: + return false + } +} + +// writeQuoted writes s to w surrounded by quotes. Normally it will use double +// quotes, but if s contains a double quote, it will use single quotes. +// It is used for writing the identifiers in a doctype declaration. +// In valid HTML, they can't contain both types of quotes. +func writeQuoted(w writer, s string) error { + var q byte = '"' + if strings.Contains(s, `"`) { + q = '\'' + } + if err := w.WriteByte(q); err != nil { + return err + } + if _, err := w.WriteString(s); err != nil { + return err + } + if err := w.WriteByte(q); err != nil { + return err + } + return nil +} + +// Section 12.1.2, "Elements", gives this list of void elements. Void elements +// are those that can't have any contents. +var voidElements = map[string]bool{ + "area": true, + "base": true, + "br": true, + "col": true, + "embed": true, + "hr": true, + "img": true, + "input": true, + "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility. + "link": true, + "meta": true, + "param": true, + "source": true, + "track": true, + "wbr": true, +} diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go new file mode 100644 index 00000000000..3c57880d697 --- /dev/null +++ b/vendor/golang.org/x/net/html/token.go @@ -0,0 +1,1272 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "errors" + "io" + "strconv" + "strings" + + "golang.org/x/net/html/atom" +) + +// A TokenType is the type of a Token. +type TokenType uint32 + +const ( + // ErrorToken means that an error occurred during tokenization. + ErrorToken TokenType = iota + // TextToken means a text node. + TextToken + // A StartTagToken looks like <a>. + StartTagToken + // An EndTagToken looks like </a>. + EndTagToken + // A SelfClosingTagToken tag looks like <br/>. + SelfClosingTagToken + // A CommentToken looks like <!--x-->. + CommentToken + // A DoctypeToken looks like <!DOCTYPE x> + DoctypeToken +) + +// ErrBufferExceeded means that the buffering limit was exceeded. +var ErrBufferExceeded = errors.New("max buffer exceeded") + +// String returns a string representation of the TokenType. +func (t TokenType) String() string { + switch t { + case ErrorToken: + return "Error" + case TextToken: + return "Text" + case StartTagToken: + return "StartTag" + case EndTagToken: + return "EndTag" + case SelfClosingTagToken: + return "SelfClosingTag" + case CommentToken: + return "Comment" + case DoctypeToken: + return "Doctype" + } + return "Invalid(" + strconv.Itoa(int(t)) + ")" +} + +// An Attribute is an attribute namespace-key-value triple. Namespace is +// non-empty for foreign attributes like xlink, Key is alphabetic (and hence +// does not contain escapable characters like '&', '<' or '>'), and Val is +// unescaped (it looks like "a<b" rather than "a&lt;b"). +// +// Namespace is only used by the parser, not the tokenizer. +type Attribute struct { + Namespace, Key, Val string +} + +// A Token consists of a TokenType and some Data (tag name for start and end +// tags, content for text, comments and doctypes). A tag Token may also contain +// a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b" +// rather than "a&lt;b"). For tag Tokens, DataAtom is the atom for Data, or +// zero if Data is not a known tag name. +type Token struct { + Type TokenType + DataAtom atom.Atom + Data string + Attr []Attribute +} + +// tagString returns a string representation of a tag Token's Data and Attr. +func (t Token) tagString() string { + if len(t.Attr) == 0 { + return t.Data + } + buf := bytes.NewBufferString(t.Data) + for _, a := range t.Attr { + buf.WriteByte(' ') + buf.WriteString(a.Key) + buf.WriteString(`="`) + escape(buf, a.Val) + buf.WriteByte('"') + } + return buf.String() +} + +// String returns a string representation of the Token. +func (t Token) String() string { + switch t.Type { + case ErrorToken: + return "" + case TextToken: + return EscapeString(t.Data) + case StartTagToken: + return "<" + t.tagString() + ">" + case EndTagToken: + return "</" + t.tagString() + ">" + case SelfClosingTagToken: + return "<" + t.tagString() + "/>" + case CommentToken: + return "<!--" + escapeCommentString(t.Data) + "-->" + case DoctypeToken: + return "<!DOCTYPE " + EscapeString(t.Data) + ">" + } + return "Invalid(" + strconv.Itoa(int(t.Type)) + ")" +} + +// span is a range of bytes in a Tokenizer's buffer. The start is inclusive, +// the end is exclusive. +type span struct { + start, end int +} + +// A Tokenizer returns a stream of HTML Tokens. +type Tokenizer struct { + // r is the source of the HTML text. + r io.Reader + // tt is the TokenType of the current token. + tt TokenType + // err is the first error encountered during tokenization. It is possible + // for tt != Error && err != nil to hold: this means that Next returned a + // valid token but the subsequent Next call will return an error token. + // For example, if the HTML text input was just "plain", then the first + // Next call would set z.err to io.EOF but return a TextToken, and all + // subsequent Next calls would return an ErrorToken. + // err is never reset. Once it becomes non-nil, it stays non-nil. + err error + // readErr is the error returned by the io.Reader r. It is separate from + // err because it is valid for an io.Reader to return (n int, err1 error) + // such that n > 0 && err1 != nil, and callers should always process the + // n > 0 bytes before considering the error err1. + readErr error + // buf[raw.start:raw.end] holds the raw bytes of the current token. + // buf[raw.end:] is buffered input that will yield future tokens. + raw span + buf []byte + // maxBuf limits the data buffered in buf. A value of 0 means unlimited. + maxBuf int + // buf[data.start:data.end] holds the raw bytes of the current token's data: + // a text token's text, a tag token's tag name, etc. + data span + // pendingAttr is the attribute key and value currently being tokenized. + // When complete, pendingAttr is pushed onto attr. nAttrReturned is + // incremented on each call to TagAttr. + pendingAttr [2]span + attr [][2]span + nAttrReturned int + // rawTag is the "script" in "</script>" that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "<p>" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "<!-- [CDATA[foo]] -->" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an <svg> element +// can contain a <foreignObject> that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>" +// yields a start tag token for "<title>", a text token for "a<b>c</b>d", and +// an end tag token for "</title>". There are no distinct start tag or end tag +// tokens for the "<b>" and "</b>". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: <title> generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + if n, err := r.Read(b); n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + z.raw.end-- + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + // When modifying this function, consider manually increasing the + // maxSuffixLen constant in func TestComments, from 6 to e.g. 9 or more. + // That increase should only be temporary, not committed, as it + // exponentially affects the test running time. + + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + + var dashCount int + beginning := true + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 || beginning { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.calculateAbruptCommentDataEnd() + return + } else if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } else if c == '-' { + dashCount = 1 + beginning = false + continue + } + } + } + dashCount = 0 + beginning = false + } +} + +func (z *Tokenizer) calculateAbruptCommentDataEnd() int { + raw := z.Raw() + const prefixLen = len("<!--") + if len(raw) >= prefixLen { + raw = raw[prefixLen:] + if hasSuffix(raw, "--!") { + return z.raw.end - 3 + } else if hasSuffix(raw, "--") { + return z.raw.end - 2 + } else if hasSuffix(raw, "-") { + return z.raw.end - 1 + } + } + return z.raw.end +} + +func hasSuffix(b []byte, suffix string) bool { + if len(b) < len(suffix) { + return false + } + b = b[len(b)-len(suffix):] + for i := range b { + if b[i] != suffix[i] { + return false + } + } + return true +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case '=': + if z.pendingAttr[0].start+1 == z.raw.end { + // WHATWG 13.2.5.32, if we see an equals sign before the attribute name + // begins, we treat it as a character in the attribute name and continue. + continue + } + fallthrough + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // WHATWG 13.2.5.33 Attribute name state + // We need to reconsume the char in the after attribute name state to support the / character + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c == '/' { + // WHATWG 13.2.5.34 After attribute name state + // U+002F SOLIDUS (/) - Switch to the self-closing start tag state. + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + // Reconsume the current character. + z.raw.end-- + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +// +// The token stream's raw bytes partition the byte stream (up until an +// ErrorToken). There are no overlaps or gaps between two consecutive token's +// raw bytes. One implication is that the byte offset of the current token is +// the sum of the lengths of all previous tokens' raw bytes. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an existing element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b279c0..81faec7e75d 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 7688c356b7c..c7601c909ff 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 832414b450c..b55547aec64 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f5968f44071..090d0e1bdb5 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,26 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +397,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +831,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1466,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1748,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2461,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3279,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c2046..48dbb9d84c8 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321748..109997d77ce 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go b/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go new file mode 100644 index 00000000000..bdc7d15dda4 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go @@ -0,0 +1,86 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package htmlindex maps character set encoding names to Encodings as +// recommended by the W3C for use in HTML 5. See http://www.w3.org/TR/encoding. +package htmlindex + +// TODO: perhaps have a "bare" version of the index (used by this package) that +// is not pre-loaded with all encodings. Global variables in encodings prevent +// the linker from being able to purge unneeded tables. This means that +// referencing all encodings, as this package does for the default index, links +// in all encodings unconditionally. +// +// This issue can be solved by either solving the linking issue (see +// https://github.com/golang/go/issues/6330) or refactoring the encoding tables +// (e.g. moving the tables to internal packages that do not use global +// variables). + +// TODO: allow canonicalizing names + +import ( + "errors" + "strings" + "sync" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/language" +) + +var ( + errInvalidName = errors.New("htmlindex: invalid encoding name") + errUnknown = errors.New("htmlindex: unknown Encoding") + errUnsupported = errors.New("htmlindex: this encoding is not supported") +) + +var ( + matcherOnce sync.Once + matcher language.Matcher +) + +// LanguageDefault returns the canonical name of the default encoding for a +// given language. +func LanguageDefault(tag language.Tag) string { + matcherOnce.Do(func() { + tags := []language.Tag{} + for _, t := range strings.Split(locales, " ") { + tags = append(tags, language.MustParse(t)) + } + matcher = language.NewMatcher(tags, language.PreferSameScript(true)) + }) + _, i, _ := matcher.Match(tag) + return canonical[localeMap[i]] // Default is Windows-1252. +} + +// Get returns an Encoding for one of the names listed in +// http://www.w3.org/TR/encoding using the Default Index. Matching is case- +// insensitive. +func Get(name string) (encoding.Encoding, error) { + x, ok := nameMap[strings.ToLower(strings.TrimSpace(name))] + if !ok { + return nil, errInvalidName + } + return encodings[x], nil +} + +// Name reports the canonical name of the given Encoding. It will return +// an error if e is not associated with a supported encoding scheme. +func Name(e encoding.Encoding) (string, error) { + id, ok := e.(identifier.Interface) + if !ok { + return "", errUnknown + } + mib, _ := id.ID() + if mib == 0 { + return "", errUnknown + } + v, ok := mibMap[mib] + if !ok { + return "", errUnsupported + } + return canonical[v], nil +} diff --git a/vendor/golang.org/x/text/encoding/htmlindex/map.go b/vendor/golang.org/x/text/encoding/htmlindex/map.go new file mode 100644 index 00000000000..c61439045d0 --- /dev/null +++ b/vendor/golang.org/x/text/encoding/htmlindex/map.go @@ -0,0 +1,105 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package htmlindex + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/encoding/japanese" + "golang.org/x/text/encoding/korean" + "golang.org/x/text/encoding/simplifiedchinese" + "golang.org/x/text/encoding/traditionalchinese" + "golang.org/x/text/encoding/unicode" +) + +// mibMap maps a MIB identifier to an htmlEncoding index. +var mibMap = map[identifier.MIB]htmlEncoding{ + identifier.UTF8: utf8, + identifier.UTF16BE: utf16be, + identifier.UTF16LE: utf16le, + identifier.IBM866: ibm866, + identifier.ISOLatin2: iso8859_2, + identifier.ISOLatin3: iso8859_3, + identifier.ISOLatin4: iso8859_4, + identifier.ISOLatinCyrillic: iso8859_5, + identifier.ISOLatinArabic: iso8859_6, + identifier.ISOLatinGreek: iso8859_7, + identifier.ISOLatinHebrew: iso8859_8, + identifier.ISO88598I: iso8859_8I, + identifier.ISOLatin6: iso8859_10, + identifier.ISO885913: iso8859_13, + identifier.ISO885914: iso8859_14, + identifier.ISO885915: iso8859_15, + identifier.ISO885916: iso8859_16, + identifier.KOI8R: koi8r, + identifier.KOI8U: koi8u, + identifier.Macintosh: macintosh, + identifier.MacintoshCyrillic: macintoshCyrillic, + identifier.Windows874: windows874, + identifier.Windows1250: windows1250, + identifier.Windows1251: windows1251, + identifier.Windows1252: windows1252, + identifier.Windows1253: windows1253, + identifier.Windows1254: windows1254, + identifier.Windows1255: windows1255, + identifier.Windows1256: windows1256, + identifier.Windows1257: windows1257, + identifier.Windows1258: windows1258, + identifier.XUserDefined: xUserDefined, + identifier.GBK: gbk, + identifier.GB18030: gb18030, + identifier.Big5: big5, + identifier.EUCPkdFmtJapanese: eucjp, + identifier.ISO2022JP: iso2022jp, + identifier.ShiftJIS: shiftJIS, + identifier.EUCKR: euckr, + identifier.Replacement: replacement, +} + +// encodings maps the internal htmlEncoding to an Encoding. +// TODO: consider using a reusable index in encoding/internal. +var encodings = [numEncodings]encoding.Encoding{ + utf8: unicode.UTF8, + ibm866: charmap.CodePage866, + iso8859_2: charmap.ISO8859_2, + iso8859_3: charmap.ISO8859_3, + iso8859_4: charmap.ISO8859_4, + iso8859_5: charmap.ISO8859_5, + iso8859_6: charmap.ISO8859_6, + iso8859_7: charmap.ISO8859_7, + iso8859_8: charmap.ISO8859_8, + iso8859_8I: charmap.ISO8859_8I, + iso8859_10: charmap.ISO8859_10, + iso8859_13: charmap.ISO8859_13, + iso8859_14: charmap.ISO8859_14, + iso8859_15: charmap.ISO8859_15, + iso8859_16: charmap.ISO8859_16, + koi8r: charmap.KOI8R, + koi8u: charmap.KOI8U, + macintosh: charmap.Macintosh, + windows874: charmap.Windows874, + windows1250: charmap.Windows1250, + windows1251: charmap.Windows1251, + windows1252: charmap.Windows1252, + windows1253: charmap.Windows1253, + windows1254: charmap.Windows1254, + windows1255: charmap.Windows1255, + windows1256: charmap.Windows1256, + windows1257: charmap.Windows1257, + windows1258: charmap.Windows1258, + macintoshCyrillic: charmap.MacintoshCyrillic, + gbk: simplifiedchinese.GBK, + gb18030: simplifiedchinese.GB18030, + big5: traditionalchinese.Big5, + eucjp: japanese.EUCJP, + iso2022jp: japanese.ISO2022JP, + shiftJIS: japanese.ShiftJIS, + euckr: korean.EUCKR, + replacement: encoding.Replacement, + utf16be: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM), + utf16le: unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), + xUserDefined: charmap.XUserDefined, +} diff --git a/vendor/golang.org/x/text/encoding/htmlindex/tables.go b/vendor/golang.org/x/text/encoding/htmlindex/tables.go new file mode 100644 index 00000000000..9e6daa8965c --- /dev/null +++ b/vendor/golang.org/x/text/encoding/htmlindex/tables.go @@ -0,0 +1,362 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package htmlindex + +type htmlEncoding byte + +const ( + utf8 htmlEncoding = iota + ibm866 + iso8859_2 + iso8859_3 + iso8859_4 + iso8859_5 + iso8859_6 + iso8859_7 + iso8859_8 + iso8859_8I + iso8859_10 + iso8859_13 + iso8859_14 + iso8859_15 + iso8859_16 + koi8r + koi8u + macintosh + windows874 + windows1250 + windows1251 + windows1252 + windows1253 + windows1254 + windows1255 + windows1256 + windows1257 + windows1258 + macintoshCyrillic + gbk + gb18030 + big5 + eucjp + iso2022jp + shiftJIS + euckr + replacement + utf16be + utf16le + xUserDefined + numEncodings +) + +var canonical = [numEncodings]string{ + "utf-8", + "ibm866", + "iso-8859-2", + "iso-8859-3", + "iso-8859-4", + "iso-8859-5", + "iso-8859-6", + "iso-8859-7", + "iso-8859-8", + "iso-8859-8-i", + "iso-8859-10", + "iso-8859-13", + "iso-8859-14", + "iso-8859-15", + "iso-8859-16", + "koi8-r", + "koi8-u", + "macintosh", + "windows-874", + "windows-1250", + "windows-1251", + "windows-1252", + "windows-1253", + "windows-1254", + "windows-1255", + "windows-1256", + "windows-1257", + "windows-1258", + "x-mac-cyrillic", + "gbk", + "gb18030", + "big5", + "euc-jp", + "iso-2022-jp", + "shift_jis", + "euc-kr", + "replacement", + "utf-16be", + "utf-16le", + "x-user-defined", +} + +var nameMap = map[string]htmlEncoding{ + "unicode-1-1-utf-8": utf8, + "unicode11utf8": utf8, + "unicode20utf8": utf8, + "utf-8": utf8, + "utf8": utf8, + "x-unicode20utf8": utf8, + "866": ibm866, + "cp866": ibm866, + "csibm866": ibm866, + "ibm866": ibm866, + "csisolatin2": iso8859_2, + "iso-8859-2": iso8859_2, + "iso-ir-101": iso8859_2, + "iso8859-2": iso8859_2, + "iso88592": iso8859_2, + "iso_8859-2": iso8859_2, + "iso_8859-2:1987": iso8859_2, + "l2": iso8859_2, + "latin2": iso8859_2, + "csisolatin3": iso8859_3, + "iso-8859-3": iso8859_3, + "iso-ir-109": iso8859_3, + "iso8859-3": iso8859_3, + "iso88593": iso8859_3, + "iso_8859-3": iso8859_3, + "iso_8859-3:1988": iso8859_3, + "l3": iso8859_3, + "latin3": iso8859_3, + "csisolatin4": iso8859_4, + "iso-8859-4": iso8859_4, + "iso-ir-110": iso8859_4, + "iso8859-4": iso8859_4, + "iso88594": iso8859_4, + "iso_8859-4": iso8859_4, + "iso_8859-4:1988": iso8859_4, + "l4": iso8859_4, + "latin4": iso8859_4, + "csisolatincyrillic": iso8859_5, + "cyrillic": iso8859_5, + "iso-8859-5": iso8859_5, + "iso-ir-144": iso8859_5, + "iso8859-5": iso8859_5, + "iso88595": iso8859_5, + "iso_8859-5": iso8859_5, + "iso_8859-5:1988": iso8859_5, + "arabic": iso8859_6, + "asmo-708": iso8859_6, + "csiso88596e": iso8859_6, + "csiso88596i": iso8859_6, + "csisolatinarabic": iso8859_6, + "ecma-114": iso8859_6, + "iso-8859-6": iso8859_6, + "iso-8859-6-e": iso8859_6, + "iso-8859-6-i": iso8859_6, + "iso-ir-127": iso8859_6, + "iso8859-6": iso8859_6, + "iso88596": iso8859_6, + "iso_8859-6": iso8859_6, + "iso_8859-6:1987": iso8859_6, + "csisolatingreek": iso8859_7, + "ecma-118": iso8859_7, + "elot_928": iso8859_7, + "greek": iso8859_7, + "greek8": iso8859_7, + "iso-8859-7": iso8859_7, + "iso-ir-126": iso8859_7, + "iso8859-7": iso8859_7, + "iso88597": iso8859_7, + "iso_8859-7": iso8859_7, + "iso_8859-7:1987": iso8859_7, + "sun_eu_greek": iso8859_7, + "csiso88598e": iso8859_8, + "csisolatinhebrew": iso8859_8, + "hebrew": iso8859_8, + "iso-8859-8": iso8859_8, + "iso-8859-8-e": iso8859_8, + "iso-ir-138": iso8859_8, + "iso8859-8": iso8859_8, + "iso88598": iso8859_8, + "iso_8859-8": iso8859_8, + "iso_8859-8:1988": iso8859_8, + "visual": iso8859_8, + "csiso88598i": iso8859_8I, + "iso-8859-8-i": iso8859_8I, + "logical": iso8859_8I, + "csisolatin6": iso8859_10, + "iso-8859-10": iso8859_10, + "iso-ir-157": iso8859_10, + "iso8859-10": iso8859_10, + "iso885910": iso8859_10, + "l6": iso8859_10, + "latin6": iso8859_10, + "iso-8859-13": iso8859_13, + "iso8859-13": iso8859_13, + "iso885913": iso8859_13, + "iso-8859-14": iso8859_14, + "iso8859-14": iso8859_14, + "iso885914": iso8859_14, + "csisolatin9": iso8859_15, + "iso-8859-15": iso8859_15, + "iso8859-15": iso8859_15, + "iso885915": iso8859_15, + "iso_8859-15": iso8859_15, + "l9": iso8859_15, + "iso-8859-16": iso8859_16, + "cskoi8r": koi8r, + "koi": koi8r, + "koi8": koi8r, + "koi8-r": koi8r, + "koi8_r": koi8r, + "koi8-ru": koi8u, + "koi8-u": koi8u, + "csmacintosh": macintosh, + "mac": macintosh, + "macintosh": macintosh, + "x-mac-roman": macintosh, + "dos-874": windows874, + "iso-8859-11": windows874, + "iso8859-11": windows874, + "iso885911": windows874, + "tis-620": windows874, + "windows-874": windows874, + "cp1250": windows1250, + "windows-1250": windows1250, + "x-cp1250": windows1250, + "cp1251": windows1251, + "windows-1251": windows1251, + "x-cp1251": windows1251, + "ansi_x3.4-1968": windows1252, + "ascii": windows1252, + "cp1252": windows1252, + "cp819": windows1252, + "csisolatin1": windows1252, + "ibm819": windows1252, + "iso-8859-1": windows1252, + "iso-ir-100": windows1252, + "iso8859-1": windows1252, + "iso88591": windows1252, + "iso_8859-1": windows1252, + "iso_8859-1:1987": windows1252, + "l1": windows1252, + "latin1": windows1252, + "us-ascii": windows1252, + "windows-1252": windows1252, + "x-cp1252": windows1252, + "cp1253": windows1253, + "windows-1253": windows1253, + "x-cp1253": windows1253, + "cp1254": windows1254, + "csisolatin5": windows1254, + "iso-8859-9": windows1254, + "iso-ir-148": windows1254, + "iso8859-9": windows1254, + "iso88599": windows1254, + "iso_8859-9": windows1254, + "iso_8859-9:1989": windows1254, + "l5": windows1254, + "latin5": windows1254, + "windows-1254": windows1254, + "x-cp1254": windows1254, + "cp1255": windows1255, + "windows-1255": windows1255, + "x-cp1255": windows1255, + "cp1256": windows1256, + "windows-1256": windows1256, + "x-cp1256": windows1256, + "cp1257": windows1257, + "windows-1257": windows1257, + "x-cp1257": windows1257, + "cp1258": windows1258, + "windows-1258": windows1258, + "x-cp1258": windows1258, + "x-mac-cyrillic": macintoshCyrillic, + "x-mac-ukrainian": macintoshCyrillic, + "chinese": gbk, + "csgb2312": gbk, + "csiso58gb231280": gbk, + "gb2312": gbk, + "gb_2312": gbk, + "gb_2312-80": gbk, + "gbk": gbk, + "iso-ir-58": gbk, + "x-gbk": gbk, + "gb18030": gb18030, + "big5": big5, + "big5-hkscs": big5, + "cn-big5": big5, + "csbig5": big5, + "x-x-big5": big5, + "cseucpkdfmtjapanese": eucjp, + "euc-jp": eucjp, + "x-euc-jp": eucjp, + "csiso2022jp": iso2022jp, + "iso-2022-jp": iso2022jp, + "csshiftjis": shiftJIS, + "ms932": shiftJIS, + "ms_kanji": shiftJIS, + "shift-jis": shiftJIS, + "shift_jis": shiftJIS, + "sjis": shiftJIS, + "windows-31j": shiftJIS, + "x-sjis": shiftJIS, + "cseuckr": euckr, + "csksc56011987": euckr, + "euc-kr": euckr, + "iso-ir-149": euckr, + "korean": euckr, + "ks_c_5601-1987": euckr, + "ks_c_5601-1989": euckr, + "ksc5601": euckr, + "ksc_5601": euckr, + "windows-949": euckr, + "csiso2022kr": replacement, + "hz-gb-2312": replacement, + "iso-2022-cn": replacement, + "iso-2022-cn-ext": replacement, + "iso-2022-kr": replacement, + "replacement": replacement, + "unicodefffe": utf16be, + "utf-16be": utf16be, + "csunicode": utf16le, + "iso-10646-ucs-2": utf16le, + "ucs-2": utf16le, + "unicode": utf16le, + "unicodefeff": utf16le, + "utf-16": utf16le, + "utf-16le": utf16le, + "x-user-defined": xUserDefined, +} + +var localeMap = []htmlEncoding{ + windows1252, // und_Latn + windows1256, // ar + windows1251, // ba + windows1251, // be + windows1251, // bg + windows1250, // cs + iso8859_7, // el + windows1257, // et + windows1256, // fa + windows1255, // he + windows1250, // hr + iso8859_2, // hu + shiftJIS, // ja + windows1251, // kk + euckr, // ko + windows1254, // ku + windows1251, // ky + windows1257, // lt + windows1257, // lv + windows1251, // mk + iso8859_2, // pl + windows1251, // ru + windows1251, // sah + windows1250, // sk + iso8859_2, // sl + windows1251, // sr + windows1251, // tg + windows874, // th + windows1254, // tr + windows1251, // tt + windows1251, // uk + windows1258, // vi + gb18030, // zh-hans + big5, // zh-hant +} + +const locales = "und_Latn ar ba be bg cs el et fa he hr hu ja kk ko ku ky lt lv mk pl ru sah sk sl sr tg th tr tt uk vi zh-hans zh-hant" diff --git a/vendor/golang.org/x/text/internal/language/common.go b/vendor/golang.org/x/text/internal/language/common.go new file mode 100644 index 00000000000..cdfdb749718 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/common.go @@ -0,0 +1,16 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// This file contains code common to the maketables.go and the package code. + +// AliasType is the type of an alias in AliasMap. +type AliasType int8 + +const ( + Deprecated AliasType = iota + Macro + Legacy + + AliasTypeUnknown AliasType = -1 +) diff --git a/vendor/golang.org/x/text/internal/language/compact.go b/vendor/golang.org/x/text/internal/language/compact.go new file mode 100644 index 00000000000..46a0015074f --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// CompactCoreInfo is a compact integer with the three core tags encoded. +type CompactCoreInfo uint32 + +// GetCompactCore generates a uint32 value that is guaranteed to be unique for +// different language, region, and script values. +func GetCompactCore(t Tag) (cci CompactCoreInfo, ok bool) { + if t.LangID > langNoIndexOffset { + return 0, false + } + cci |= CompactCoreInfo(t.LangID) << (8 + 12) + cci |= CompactCoreInfo(t.ScriptID) << 12 + cci |= CompactCoreInfo(t.RegionID) + return cci, true +} + +// Tag generates a tag from c. +func (c CompactCoreInfo) Tag() Tag { + return Tag{ + LangID: Language(c >> 20), + RegionID: Region(c & 0x3ff), + ScriptID: Script(c>>12) & 0xff, + } +} diff --git a/vendor/golang.org/x/text/internal/language/compact/compact.go b/vendor/golang.org/x/text/internal/language/compact/compact.go new file mode 100644 index 00000000000..1b36935ef7b --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/compact.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package compact defines a compact representation of language tags. +// +// Common language tags (at least all for which locale information is defined +// in CLDR) are assigned a unique index. Each Tag is associated with such an +// ID for selecting language-related resources (such as translations) as well +// as one for selecting regional defaults (currency, number formatting, etc.) +// +// It may want to export this functionality at some point, but at this point +// this is only available for use within x/text. +package compact // import "golang.org/x/text/internal/language/compact" + +import ( + "sort" + "strings" + + "golang.org/x/text/internal/language" +) + +// ID is an integer identifying a single tag. +type ID uint16 + +func getCoreIndex(t language.Tag) (id ID, ok bool) { + cci, ok := language.GetCompactCore(t) + if !ok { + return 0, false + } + i := sort.Search(len(coreTags), func(i int) bool { + return cci <= coreTags[i] + }) + if i == len(coreTags) || coreTags[i] != cci { + return 0, false + } + return ID(i), true +} + +// Parent returns the ID of the parent or the root ID if id is already the root. +func (id ID) Parent() ID { + return parents[id] +} + +// Tag converts id to an internal language Tag. +func (id ID) Tag() language.Tag { + if int(id) >= len(coreTags) { + return specialTags[int(id)-len(coreTags)] + } + return coreTags[id].Tag() +} + +var specialTags []language.Tag + +func init() { + tags := strings.Split(specialTagsStr, " ") + specialTags = make([]language.Tag, len(tags)) + for i, t := range tags { + specialTags[i] = language.MustParse(t) + } +} diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go new file mode 100644 index 00000000000..8c1b6666fb8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/language.go @@ -0,0 +1,260 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_index.go -output tables.go +//go:generate go run gen_parents.go + +package compact + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "strings" + + "golang.org/x/text/internal/language" +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag struct { + // NOTE: exported tags will become part of the public API. + language ID + locale ID + full fullTag // always a language.Tag for now. +} + +const _und = 0 + +type fullTag interface { + IsRoot() bool + Parent() language.Tag +} + +// Make a compact Tag from a fully specified internal language Tag. +func Make(t language.Tag) (tag Tag) { + if region := t.TypeForKey("rg"); len(region) == 6 && region[2:] == "zzzz" { + if r, err := language.ParseRegion(region[:2]); err == nil { + tFull := t + t, _ = t.SetTypeForKey("rg", "") + // TODO: should we not consider "va" for the language tag? + var exact1, exact2 bool + tag.language, exact1 = FromTag(t) + t.RegionID = r + tag.locale, exact2 = FromTag(t) + if !exact1 || !exact2 { + tag.full = tFull + } + return tag + } + } + lang, ok := FromTag(t) + tag.language = lang + tag.locale = lang + if !ok { + tag.full = t + } + return tag +} + +// Tag returns an internal language Tag version of this tag. +func (t Tag) Tag() language.Tag { + if t.full != nil { + return t.full.(language.Tag) + } + tag := t.language.Tag() + if t.language != t.locale { + loc := t.locale.Tag() + tag, _ = tag.SetTypeForKey("rg", strings.ToLower(loc.RegionID.String())+"zzzz") + } + return tag +} + +// IsCompact reports whether this tag is fully defined in terms of ID. +func (t *Tag) IsCompact() bool { + return t.full == nil +} + +// MayHaveVariants reports whether a tag may have variants. If it returns false +// it is guaranteed the tag does not have variants. +func (t Tag) MayHaveVariants() bool { + return t.full != nil || int(t.language) >= len(coreTags) +} + +// MayHaveExtensions reports whether a tag may have extensions. If it returns +// false it is guaranteed the tag does not have them. +func (t Tag) MayHaveExtensions() bool { + return t.full != nil || + int(t.language) >= len(coreTags) || + t.language != t.locale +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if t.full != nil { + return t.full.IsRoot() + } + return t.language == _und +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.full != nil { + return Make(t.full.Parent()) + } + if t.language != t.locale { + // Simulate stripping -u-rg-xxxxxx + return Tag{language: t.language, locale: t.language} + } + // TODO: use parent lookup table once cycle from internal package is + // removed. Probably by internalizing the table and declaring this fast + // enough. + // lang := compactID(internal.Parent(uint16(t.language))) + lang, _ := FromTag(t.language.Tag().Parent()) + return Tag{language: lang, locale: lang} +} + +// nextToken returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// LanguageID returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository.The index will change over time +// and should not be stored in persistent storage. If t does not match a compact +// index, exact will be false and the compact index will be returned for the +// first match after repeatedly taking the Parent of t. +func LanguageID(t Tag) (id ID, exact bool) { + return t.language, t.full == nil +} + +// RegionalID returns the ID for the regional variant of this tag. This index is +// used to indicate region-specific overrides, such as default currency, default +// calendar and week data, default time cycle, and default measurement system +// and unit preferences. +// +// For instance, the tag en-GB-u-rg-uszzzz specifies British English with US +// settings for currency, number formatting, etc. The CompactIndex for this tag +// will be that for en-GB, while the RegionalID will be the one corresponding to +// en-US. +func RegionalID(t Tag) (id ID, exact bool) { + return t.locale, t.full == nil +} + +// LanguageTag returns t stripped of regional variant indicators. +// +// At the moment this means it is stripped of a regional and variant subtag "rg" +// and "va" in the "u" extension. +func (t Tag) LanguageTag() Tag { + if t.full == nil { + return Tag{language: t.language, locale: t.language} + } + tt := t.Tag() + tt.SetTypeForKey("rg", "") + tt.SetTypeForKey("va", "") + return Make(tt) +} + +// RegionalTag returns the regional variant of the tag. +// +// At the moment this means that the region is set from the regional subtag +// "rg" in the "u" extension. +func (t Tag) RegionalTag() Tag { + rt := Tag{language: t.locale, locale: t.locale} + if t.full == nil { + return rt + } + b := language.Builder{} + tag := t.Tag() + // tag, _ = tag.SetTypeForKey("rg", "") + b.SetTag(t.locale.Tag()) + if v := tag.Variants(); v != "" { + for _, v := range strings.Split(v, "-") { + b.AddVariant(v) + } + } + for _, e := range tag.Extensions() { + b.AddExt(e) + } + return t +} + +// FromTag reports closest matching ID for an internal language Tag. +func FromTag(t language.Tag) (id ID, exact bool) { + // TODO: perhaps give more frequent tags a lower index. + // TODO: we could make the indexes stable. This will excluded some + // possibilities for optimization, so don't do this quite yet. + exact = true + + b, s, r := t.Raw() + if t.HasString() { + if t.IsPrivateUse() { + // We have no entries for user-defined tags. + return 0, false + } + hasExtra := false + if t.HasVariants() { + if t.HasExtensions() { + build := language.Builder{} + build.SetTag(language.Tag{LangID: b, ScriptID: s, RegionID: r}) + build.AddVariant(t.Variants()) + exact = false + t = build.Make() + } + hasExtra = true + } else if _, ok := t.Extension('u'); ok { + // TODO: va may mean something else. Consider not considering it. + // Strip all but the 'va' entry. + old := t + variant := t.TypeForKey("va") + t = language.Tag{LangID: b, ScriptID: s, RegionID: r} + if variant != "" { + t, _ = t.SetTypeForKey("va", variant) + hasExtra = true + } + exact = old == t + } else { + exact = false + } + if hasExtra { + // We have some variants. + for i, s := range specialTags { + if s == t { + return ID(i + len(coreTags)), exact + } + } + exact = false + } + } + if x, ok := getCoreIndex(t); ok { + return x, exact + } + exact = false + if r != 0 && s == 0 { + // Deal with cases where an extra script is inserted for the region. + t, _ := t.Maximize() + if x, ok := getCoreIndex(t); ok { + return x, exact + } + } + for t = t.Parent(); t != root; t = t.Parent() { + // No variants specified: just compare core components. + // The key has the form lllssrrr, where l, s, and r are nibbles for + // respectively the langID, scriptID, and regionID. + if x, ok := getCoreIndex(t); ok { + return x, exact + } + } + return 0, exact +} + +var root = language.Tag{} diff --git a/vendor/golang.org/x/text/internal/language/compact/parents.go b/vendor/golang.org/x/text/internal/language/compact/parents.go new file mode 100644 index 00000000000..8d810723c75 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/parents.go @@ -0,0 +1,120 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package compact + +// parents maps a compact index of a tag to the compact index of the parent of +// this tag. +var parents = []ID{ // 775 elements + // Entry 0 - 3F + 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0004, 0x0000, 0x0006, + 0x0000, 0x0008, 0x0000, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, + 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x000a, 0x0000, + 0x0000, 0x0028, 0x0000, 0x002a, 0x0000, 0x002c, 0x0000, 0x0000, + 0x002f, 0x002e, 0x002e, 0x0000, 0x0033, 0x0000, 0x0035, 0x0000, + 0x0037, 0x0000, 0x0039, 0x0000, 0x003b, 0x0000, 0x0000, 0x003e, + // Entry 40 - 7F + 0x0000, 0x0040, 0x0040, 0x0000, 0x0043, 0x0043, 0x0000, 0x0046, + 0x0000, 0x0048, 0x0000, 0x0000, 0x004b, 0x004a, 0x004a, 0x0000, + 0x004f, 0x004f, 0x004f, 0x004f, 0x0000, 0x0054, 0x0054, 0x0000, + 0x0057, 0x0000, 0x0059, 0x0000, 0x005b, 0x0000, 0x005d, 0x005d, + 0x0000, 0x0060, 0x0000, 0x0062, 0x0000, 0x0064, 0x0000, 0x0066, + 0x0066, 0x0000, 0x0069, 0x0000, 0x006b, 0x006b, 0x006b, 0x006b, + 0x006b, 0x006b, 0x006b, 0x0000, 0x0073, 0x0000, 0x0075, 0x0000, + 0x0077, 0x0000, 0x0000, 0x007a, 0x0000, 0x007c, 0x0000, 0x007e, + // Entry 80 - BF + 0x0000, 0x0080, 0x0080, 0x0000, 0x0083, 0x0083, 0x0000, 0x0086, + 0x0087, 0x0087, 0x0087, 0x0086, 0x0088, 0x0087, 0x0087, 0x0087, + 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, 0x0088, 0x0087, + 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0087, 0x0086, + // Entry C0 - FF + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0088, 0x0087, + 0x0087, 0x0088, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, + 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0086, 0x0086, 0x0087, + 0x0087, 0x0086, 0x0087, 0x0087, 0x0087, 0x0087, 0x0087, 0x0000, + 0x00ef, 0x0000, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, + 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f1, 0x00f1, + // Entry 100 - 13F + 0x00f2, 0x00f2, 0x00f1, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f1, + 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x00f2, 0x0000, 0x010e, + 0x0000, 0x0110, 0x0000, 0x0112, 0x0000, 0x0114, 0x0114, 0x0000, + 0x0117, 0x0117, 0x0117, 0x0117, 0x0000, 0x011c, 0x0000, 0x011e, + 0x0000, 0x0120, 0x0120, 0x0000, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + // Entry 140 - 17F + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, 0x0123, + 0x0123, 0x0123, 0x0000, 0x0152, 0x0000, 0x0154, 0x0000, 0x0156, + 0x0000, 0x0158, 0x0000, 0x015a, 0x0000, 0x015c, 0x015c, 0x015c, + 0x0000, 0x0160, 0x0000, 0x0000, 0x0163, 0x0000, 0x0165, 0x0000, + 0x0167, 0x0167, 0x0167, 0x0000, 0x016b, 0x0000, 0x016d, 0x0000, + 0x016f, 0x0000, 0x0171, 0x0171, 0x0000, 0x0174, 0x0000, 0x0176, + 0x0000, 0x0178, 0x0000, 0x017a, 0x0000, 0x017c, 0x0000, 0x017e, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x0000, 0x0182, 0x0000, 0x0184, 0x0184, 0x0184, + 0x0184, 0x0000, 0x0000, 0x0000, 0x018b, 0x0000, 0x0000, 0x018e, + 0x0000, 0x0000, 0x0191, 0x0000, 0x0000, 0x0000, 0x0195, 0x0000, + 0x0197, 0x0000, 0x0000, 0x019a, 0x0000, 0x0000, 0x019d, 0x0000, + 0x019f, 0x0000, 0x01a1, 0x0000, 0x01a3, 0x0000, 0x01a5, 0x0000, + 0x01a7, 0x0000, 0x01a9, 0x0000, 0x01ab, 0x0000, 0x01ad, 0x0000, + 0x01af, 0x0000, 0x01b1, 0x01b1, 0x0000, 0x01b4, 0x0000, 0x01b6, + 0x0000, 0x01b8, 0x0000, 0x01ba, 0x0000, 0x01bc, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x01bf, 0x0000, 0x01c1, 0x0000, 0x01c3, 0x0000, 0x01c5, 0x0000, + 0x01c7, 0x0000, 0x01c9, 0x0000, 0x01cb, 0x01cb, 0x01cb, 0x01cb, + 0x0000, 0x01d0, 0x0000, 0x01d2, 0x01d2, 0x0000, 0x01d5, 0x0000, + 0x01d7, 0x0000, 0x01d9, 0x0000, 0x01db, 0x0000, 0x01dd, 0x0000, + 0x01df, 0x01df, 0x0000, 0x01e2, 0x0000, 0x01e4, 0x0000, 0x01e6, + 0x0000, 0x01e8, 0x0000, 0x01ea, 0x0000, 0x01ec, 0x0000, 0x01ee, + 0x0000, 0x01f0, 0x0000, 0x0000, 0x01f3, 0x0000, 0x01f5, 0x01f5, + 0x01f5, 0x0000, 0x01f9, 0x0000, 0x01fb, 0x0000, 0x01fd, 0x0000, + // Entry 200 - 23F + 0x01ff, 0x0000, 0x0000, 0x0202, 0x0000, 0x0204, 0x0204, 0x0000, + 0x0207, 0x0000, 0x0209, 0x0209, 0x0000, 0x020c, 0x020c, 0x0000, + 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x020f, 0x0000, + 0x0217, 0x0000, 0x0219, 0x0000, 0x021b, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0221, 0x0000, 0x0000, 0x0224, 0x0000, 0x0226, + 0x0226, 0x0000, 0x0229, 0x0000, 0x022b, 0x022b, 0x0000, 0x0000, + 0x022f, 0x022e, 0x022e, 0x0000, 0x0000, 0x0234, 0x0000, 0x0236, + 0x0000, 0x0238, 0x0000, 0x0244, 0x023a, 0x0244, 0x0244, 0x0244, + // Entry 240 - 27F + 0x0244, 0x0244, 0x0244, 0x0244, 0x023a, 0x0244, 0x0244, 0x0000, + 0x0247, 0x0247, 0x0247, 0x0000, 0x024b, 0x0000, 0x024d, 0x0000, + 0x024f, 0x024f, 0x0000, 0x0252, 0x0000, 0x0254, 0x0254, 0x0254, + 0x0254, 0x0254, 0x0254, 0x0000, 0x025b, 0x0000, 0x025d, 0x0000, + 0x025f, 0x0000, 0x0261, 0x0000, 0x0263, 0x0000, 0x0265, 0x0000, + 0x0000, 0x0268, 0x0268, 0x0268, 0x0000, 0x026c, 0x0000, 0x026e, + 0x0000, 0x0270, 0x0000, 0x0000, 0x0000, 0x0274, 0x0273, 0x0273, + 0x0000, 0x0278, 0x0000, 0x027a, 0x0000, 0x027c, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x0000, 0x0281, 0x0000, 0x0000, 0x0284, 0x0000, 0x0286, + 0x0286, 0x0286, 0x0286, 0x0000, 0x028b, 0x028b, 0x028b, 0x0000, + 0x028f, 0x028f, 0x028f, 0x028f, 0x028f, 0x0000, 0x0295, 0x0295, + 0x0295, 0x0295, 0x0000, 0x0000, 0x0000, 0x0000, 0x029d, 0x029d, + 0x029d, 0x0000, 0x02a1, 0x02a1, 0x02a1, 0x02a1, 0x0000, 0x0000, + 0x02a7, 0x02a7, 0x02a7, 0x02a7, 0x0000, 0x02ac, 0x0000, 0x02ae, + 0x02ae, 0x0000, 0x02b1, 0x0000, 0x02b3, 0x0000, 0x02b5, 0x02b5, + 0x0000, 0x0000, 0x02b9, 0x0000, 0x0000, 0x0000, 0x02bd, 0x0000, + // Entry 2C0 - 2FF + 0x02bf, 0x02bf, 0x0000, 0x0000, 0x02c3, 0x0000, 0x02c5, 0x0000, + 0x02c7, 0x0000, 0x02c9, 0x0000, 0x02cb, 0x0000, 0x02cd, 0x02cd, + 0x0000, 0x0000, 0x02d1, 0x0000, 0x02d3, 0x02d0, 0x02d0, 0x0000, + 0x0000, 0x02d8, 0x02d7, 0x02d7, 0x0000, 0x0000, 0x02dd, 0x0000, + 0x02df, 0x0000, 0x02e1, 0x0000, 0x0000, 0x02e4, 0x0000, 0x02e6, + 0x0000, 0x0000, 0x02e9, 0x0000, 0x02eb, 0x0000, 0x02ed, 0x0000, + 0x02ef, 0x02ef, 0x0000, 0x0000, 0x02f3, 0x02f2, 0x02f2, 0x0000, + 0x02f7, 0x0000, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x02f9, 0x0000, + // Entry 300 - 33F + 0x02ff, 0x0300, 0x02ff, 0x0000, 0x0303, 0x0051, 0x00e6, +} // Size: 1574 bytes + +// Total table size 1574 bytes (1KiB); checksum: 895AAF0B diff --git a/vendor/golang.org/x/text/internal/language/compact/tables.go b/vendor/golang.org/x/text/internal/language/compact/tables.go new file mode 100644 index 00000000000..a09ed198a5d --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/tables.go @@ -0,0 +1,1015 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package compact + +import "golang.org/x/text/internal/language" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +// NumCompactTags is the number of common tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = 775 +const ( + undIndex ID = 0 + afIndex ID = 1 + afNAIndex ID = 2 + afZAIndex ID = 3 + agqIndex ID = 4 + agqCMIndex ID = 5 + akIndex ID = 6 + akGHIndex ID = 7 + amIndex ID = 8 + amETIndex ID = 9 + arIndex ID = 10 + ar001Index ID = 11 + arAEIndex ID = 12 + arBHIndex ID = 13 + arDJIndex ID = 14 + arDZIndex ID = 15 + arEGIndex ID = 16 + arEHIndex ID = 17 + arERIndex ID = 18 + arILIndex ID = 19 + arIQIndex ID = 20 + arJOIndex ID = 21 + arKMIndex ID = 22 + arKWIndex ID = 23 + arLBIndex ID = 24 + arLYIndex ID = 25 + arMAIndex ID = 26 + arMRIndex ID = 27 + arOMIndex ID = 28 + arPSIndex ID = 29 + arQAIndex ID = 30 + arSAIndex ID = 31 + arSDIndex ID = 32 + arSOIndex ID = 33 + arSSIndex ID = 34 + arSYIndex ID = 35 + arTDIndex ID = 36 + arTNIndex ID = 37 + arYEIndex ID = 38 + arsIndex ID = 39 + asIndex ID = 40 + asINIndex ID = 41 + asaIndex ID = 42 + asaTZIndex ID = 43 + astIndex ID = 44 + astESIndex ID = 45 + azIndex ID = 46 + azCyrlIndex ID = 47 + azCyrlAZIndex ID = 48 + azLatnIndex ID = 49 + azLatnAZIndex ID = 50 + basIndex ID = 51 + basCMIndex ID = 52 + beIndex ID = 53 + beBYIndex ID = 54 + bemIndex ID = 55 + bemZMIndex ID = 56 + bezIndex ID = 57 + bezTZIndex ID = 58 + bgIndex ID = 59 + bgBGIndex ID = 60 + bhIndex ID = 61 + bmIndex ID = 62 + bmMLIndex ID = 63 + bnIndex ID = 64 + bnBDIndex ID = 65 + bnINIndex ID = 66 + boIndex ID = 67 + boCNIndex ID = 68 + boINIndex ID = 69 + brIndex ID = 70 + brFRIndex ID = 71 + brxIndex ID = 72 + brxINIndex ID = 73 + bsIndex ID = 74 + bsCyrlIndex ID = 75 + bsCyrlBAIndex ID = 76 + bsLatnIndex ID = 77 + bsLatnBAIndex ID = 78 + caIndex ID = 79 + caADIndex ID = 80 + caESIndex ID = 81 + caFRIndex ID = 82 + caITIndex ID = 83 + ccpIndex ID = 84 + ccpBDIndex ID = 85 + ccpINIndex ID = 86 + ceIndex ID = 87 + ceRUIndex ID = 88 + cggIndex ID = 89 + cggUGIndex ID = 90 + chrIndex ID = 91 + chrUSIndex ID = 92 + ckbIndex ID = 93 + ckbIQIndex ID = 94 + ckbIRIndex ID = 95 + csIndex ID = 96 + csCZIndex ID = 97 + cuIndex ID = 98 + cuRUIndex ID = 99 + cyIndex ID = 100 + cyGBIndex ID = 101 + daIndex ID = 102 + daDKIndex ID = 103 + daGLIndex ID = 104 + davIndex ID = 105 + davKEIndex ID = 106 + deIndex ID = 107 + deATIndex ID = 108 + deBEIndex ID = 109 + deCHIndex ID = 110 + deDEIndex ID = 111 + deITIndex ID = 112 + deLIIndex ID = 113 + deLUIndex ID = 114 + djeIndex ID = 115 + djeNEIndex ID = 116 + dsbIndex ID = 117 + dsbDEIndex ID = 118 + duaIndex ID = 119 + duaCMIndex ID = 120 + dvIndex ID = 121 + dyoIndex ID = 122 + dyoSNIndex ID = 123 + dzIndex ID = 124 + dzBTIndex ID = 125 + ebuIndex ID = 126 + ebuKEIndex ID = 127 + eeIndex ID = 128 + eeGHIndex ID = 129 + eeTGIndex ID = 130 + elIndex ID = 131 + elCYIndex ID = 132 + elGRIndex ID = 133 + enIndex ID = 134 + en001Index ID = 135 + en150Index ID = 136 + enAGIndex ID = 137 + enAIIndex ID = 138 + enASIndex ID = 139 + enATIndex ID = 140 + enAUIndex ID = 141 + enBBIndex ID = 142 + enBEIndex ID = 143 + enBIIndex ID = 144 + enBMIndex ID = 145 + enBSIndex ID = 146 + enBWIndex ID = 147 + enBZIndex ID = 148 + enCAIndex ID = 149 + enCCIndex ID = 150 + enCHIndex ID = 151 + enCKIndex ID = 152 + enCMIndex ID = 153 + enCXIndex ID = 154 + enCYIndex ID = 155 + enDEIndex ID = 156 + enDGIndex ID = 157 + enDKIndex ID = 158 + enDMIndex ID = 159 + enERIndex ID = 160 + enFIIndex ID = 161 + enFJIndex ID = 162 + enFKIndex ID = 163 + enFMIndex ID = 164 + enGBIndex ID = 165 + enGDIndex ID = 166 + enGGIndex ID = 167 + enGHIndex ID = 168 + enGIIndex ID = 169 + enGMIndex ID = 170 + enGUIndex ID = 171 + enGYIndex ID = 172 + enHKIndex ID = 173 + enIEIndex ID = 174 + enILIndex ID = 175 + enIMIndex ID = 176 + enINIndex ID = 177 + enIOIndex ID = 178 + enJEIndex ID = 179 + enJMIndex ID = 180 + enKEIndex ID = 181 + enKIIndex ID = 182 + enKNIndex ID = 183 + enKYIndex ID = 184 + enLCIndex ID = 185 + enLRIndex ID = 186 + enLSIndex ID = 187 + enMGIndex ID = 188 + enMHIndex ID = 189 + enMOIndex ID = 190 + enMPIndex ID = 191 + enMSIndex ID = 192 + enMTIndex ID = 193 + enMUIndex ID = 194 + enMWIndex ID = 195 + enMYIndex ID = 196 + enNAIndex ID = 197 + enNFIndex ID = 198 + enNGIndex ID = 199 + enNLIndex ID = 200 + enNRIndex ID = 201 + enNUIndex ID = 202 + enNZIndex ID = 203 + enPGIndex ID = 204 + enPHIndex ID = 205 + enPKIndex ID = 206 + enPNIndex ID = 207 + enPRIndex ID = 208 + enPWIndex ID = 209 + enRWIndex ID = 210 + enSBIndex ID = 211 + enSCIndex ID = 212 + enSDIndex ID = 213 + enSEIndex ID = 214 + enSGIndex ID = 215 + enSHIndex ID = 216 + enSIIndex ID = 217 + enSLIndex ID = 218 + enSSIndex ID = 219 + enSXIndex ID = 220 + enSZIndex ID = 221 + enTCIndex ID = 222 + enTKIndex ID = 223 + enTOIndex ID = 224 + enTTIndex ID = 225 + enTVIndex ID = 226 + enTZIndex ID = 227 + enUGIndex ID = 228 + enUMIndex ID = 229 + enUSIndex ID = 230 + enVCIndex ID = 231 + enVGIndex ID = 232 + enVIIndex ID = 233 + enVUIndex ID = 234 + enWSIndex ID = 235 + enZAIndex ID = 236 + enZMIndex ID = 237 + enZWIndex ID = 238 + eoIndex ID = 239 + eo001Index ID = 240 + esIndex ID = 241 + es419Index ID = 242 + esARIndex ID = 243 + esBOIndex ID = 244 + esBRIndex ID = 245 + esBZIndex ID = 246 + esCLIndex ID = 247 + esCOIndex ID = 248 + esCRIndex ID = 249 + esCUIndex ID = 250 + esDOIndex ID = 251 + esEAIndex ID = 252 + esECIndex ID = 253 + esESIndex ID = 254 + esGQIndex ID = 255 + esGTIndex ID = 256 + esHNIndex ID = 257 + esICIndex ID = 258 + esMXIndex ID = 259 + esNIIndex ID = 260 + esPAIndex ID = 261 + esPEIndex ID = 262 + esPHIndex ID = 263 + esPRIndex ID = 264 + esPYIndex ID = 265 + esSVIndex ID = 266 + esUSIndex ID = 267 + esUYIndex ID = 268 + esVEIndex ID = 269 + etIndex ID = 270 + etEEIndex ID = 271 + euIndex ID = 272 + euESIndex ID = 273 + ewoIndex ID = 274 + ewoCMIndex ID = 275 + faIndex ID = 276 + faAFIndex ID = 277 + faIRIndex ID = 278 + ffIndex ID = 279 + ffCMIndex ID = 280 + ffGNIndex ID = 281 + ffMRIndex ID = 282 + ffSNIndex ID = 283 + fiIndex ID = 284 + fiFIIndex ID = 285 + filIndex ID = 286 + filPHIndex ID = 287 + foIndex ID = 288 + foDKIndex ID = 289 + foFOIndex ID = 290 + frIndex ID = 291 + frBEIndex ID = 292 + frBFIndex ID = 293 + frBIIndex ID = 294 + frBJIndex ID = 295 + frBLIndex ID = 296 + frCAIndex ID = 297 + frCDIndex ID = 298 + frCFIndex ID = 299 + frCGIndex ID = 300 + frCHIndex ID = 301 + frCIIndex ID = 302 + frCMIndex ID = 303 + frDJIndex ID = 304 + frDZIndex ID = 305 + frFRIndex ID = 306 + frGAIndex ID = 307 + frGFIndex ID = 308 + frGNIndex ID = 309 + frGPIndex ID = 310 + frGQIndex ID = 311 + frHTIndex ID = 312 + frKMIndex ID = 313 + frLUIndex ID = 314 + frMAIndex ID = 315 + frMCIndex ID = 316 + frMFIndex ID = 317 + frMGIndex ID = 318 + frMLIndex ID = 319 + frMQIndex ID = 320 + frMRIndex ID = 321 + frMUIndex ID = 322 + frNCIndex ID = 323 + frNEIndex ID = 324 + frPFIndex ID = 325 + frPMIndex ID = 326 + frREIndex ID = 327 + frRWIndex ID = 328 + frSCIndex ID = 329 + frSNIndex ID = 330 + frSYIndex ID = 331 + frTDIndex ID = 332 + frTGIndex ID = 333 + frTNIndex ID = 334 + frVUIndex ID = 335 + frWFIndex ID = 336 + frYTIndex ID = 337 + furIndex ID = 338 + furITIndex ID = 339 + fyIndex ID = 340 + fyNLIndex ID = 341 + gaIndex ID = 342 + gaIEIndex ID = 343 + gdIndex ID = 344 + gdGBIndex ID = 345 + glIndex ID = 346 + glESIndex ID = 347 + gswIndex ID = 348 + gswCHIndex ID = 349 + gswFRIndex ID = 350 + gswLIIndex ID = 351 + guIndex ID = 352 + guINIndex ID = 353 + guwIndex ID = 354 + guzIndex ID = 355 + guzKEIndex ID = 356 + gvIndex ID = 357 + gvIMIndex ID = 358 + haIndex ID = 359 + haGHIndex ID = 360 + haNEIndex ID = 361 + haNGIndex ID = 362 + hawIndex ID = 363 + hawUSIndex ID = 364 + heIndex ID = 365 + heILIndex ID = 366 + hiIndex ID = 367 + hiINIndex ID = 368 + hrIndex ID = 369 + hrBAIndex ID = 370 + hrHRIndex ID = 371 + hsbIndex ID = 372 + hsbDEIndex ID = 373 + huIndex ID = 374 + huHUIndex ID = 375 + hyIndex ID = 376 + hyAMIndex ID = 377 + idIndex ID = 378 + idIDIndex ID = 379 + igIndex ID = 380 + igNGIndex ID = 381 + iiIndex ID = 382 + iiCNIndex ID = 383 + inIndex ID = 384 + ioIndex ID = 385 + isIndex ID = 386 + isISIndex ID = 387 + itIndex ID = 388 + itCHIndex ID = 389 + itITIndex ID = 390 + itSMIndex ID = 391 + itVAIndex ID = 392 + iuIndex ID = 393 + iwIndex ID = 394 + jaIndex ID = 395 + jaJPIndex ID = 396 + jboIndex ID = 397 + jgoIndex ID = 398 + jgoCMIndex ID = 399 + jiIndex ID = 400 + jmcIndex ID = 401 + jmcTZIndex ID = 402 + jvIndex ID = 403 + jwIndex ID = 404 + kaIndex ID = 405 + kaGEIndex ID = 406 + kabIndex ID = 407 + kabDZIndex ID = 408 + kajIndex ID = 409 + kamIndex ID = 410 + kamKEIndex ID = 411 + kcgIndex ID = 412 + kdeIndex ID = 413 + kdeTZIndex ID = 414 + keaIndex ID = 415 + keaCVIndex ID = 416 + khqIndex ID = 417 + khqMLIndex ID = 418 + kiIndex ID = 419 + kiKEIndex ID = 420 + kkIndex ID = 421 + kkKZIndex ID = 422 + kkjIndex ID = 423 + kkjCMIndex ID = 424 + klIndex ID = 425 + klGLIndex ID = 426 + klnIndex ID = 427 + klnKEIndex ID = 428 + kmIndex ID = 429 + kmKHIndex ID = 430 + knIndex ID = 431 + knINIndex ID = 432 + koIndex ID = 433 + koKPIndex ID = 434 + koKRIndex ID = 435 + kokIndex ID = 436 + kokINIndex ID = 437 + ksIndex ID = 438 + ksINIndex ID = 439 + ksbIndex ID = 440 + ksbTZIndex ID = 441 + ksfIndex ID = 442 + ksfCMIndex ID = 443 + kshIndex ID = 444 + kshDEIndex ID = 445 + kuIndex ID = 446 + kwIndex ID = 447 + kwGBIndex ID = 448 + kyIndex ID = 449 + kyKGIndex ID = 450 + lagIndex ID = 451 + lagTZIndex ID = 452 + lbIndex ID = 453 + lbLUIndex ID = 454 + lgIndex ID = 455 + lgUGIndex ID = 456 + lktIndex ID = 457 + lktUSIndex ID = 458 + lnIndex ID = 459 + lnAOIndex ID = 460 + lnCDIndex ID = 461 + lnCFIndex ID = 462 + lnCGIndex ID = 463 + loIndex ID = 464 + loLAIndex ID = 465 + lrcIndex ID = 466 + lrcIQIndex ID = 467 + lrcIRIndex ID = 468 + ltIndex ID = 469 + ltLTIndex ID = 470 + luIndex ID = 471 + luCDIndex ID = 472 + luoIndex ID = 473 + luoKEIndex ID = 474 + luyIndex ID = 475 + luyKEIndex ID = 476 + lvIndex ID = 477 + lvLVIndex ID = 478 + masIndex ID = 479 + masKEIndex ID = 480 + masTZIndex ID = 481 + merIndex ID = 482 + merKEIndex ID = 483 + mfeIndex ID = 484 + mfeMUIndex ID = 485 + mgIndex ID = 486 + mgMGIndex ID = 487 + mghIndex ID = 488 + mghMZIndex ID = 489 + mgoIndex ID = 490 + mgoCMIndex ID = 491 + mkIndex ID = 492 + mkMKIndex ID = 493 + mlIndex ID = 494 + mlINIndex ID = 495 + mnIndex ID = 496 + mnMNIndex ID = 497 + moIndex ID = 498 + mrIndex ID = 499 + mrINIndex ID = 500 + msIndex ID = 501 + msBNIndex ID = 502 + msMYIndex ID = 503 + msSGIndex ID = 504 + mtIndex ID = 505 + mtMTIndex ID = 506 + muaIndex ID = 507 + muaCMIndex ID = 508 + myIndex ID = 509 + myMMIndex ID = 510 + mznIndex ID = 511 + mznIRIndex ID = 512 + nahIndex ID = 513 + naqIndex ID = 514 + naqNAIndex ID = 515 + nbIndex ID = 516 + nbNOIndex ID = 517 + nbSJIndex ID = 518 + ndIndex ID = 519 + ndZWIndex ID = 520 + ndsIndex ID = 521 + ndsDEIndex ID = 522 + ndsNLIndex ID = 523 + neIndex ID = 524 + neINIndex ID = 525 + neNPIndex ID = 526 + nlIndex ID = 527 + nlAWIndex ID = 528 + nlBEIndex ID = 529 + nlBQIndex ID = 530 + nlCWIndex ID = 531 + nlNLIndex ID = 532 + nlSRIndex ID = 533 + nlSXIndex ID = 534 + nmgIndex ID = 535 + nmgCMIndex ID = 536 + nnIndex ID = 537 + nnNOIndex ID = 538 + nnhIndex ID = 539 + nnhCMIndex ID = 540 + noIndex ID = 541 + nqoIndex ID = 542 + nrIndex ID = 543 + nsoIndex ID = 544 + nusIndex ID = 545 + nusSSIndex ID = 546 + nyIndex ID = 547 + nynIndex ID = 548 + nynUGIndex ID = 549 + omIndex ID = 550 + omETIndex ID = 551 + omKEIndex ID = 552 + orIndex ID = 553 + orINIndex ID = 554 + osIndex ID = 555 + osGEIndex ID = 556 + osRUIndex ID = 557 + paIndex ID = 558 + paArabIndex ID = 559 + paArabPKIndex ID = 560 + paGuruIndex ID = 561 + paGuruINIndex ID = 562 + papIndex ID = 563 + plIndex ID = 564 + plPLIndex ID = 565 + prgIndex ID = 566 + prg001Index ID = 567 + psIndex ID = 568 + psAFIndex ID = 569 + ptIndex ID = 570 + ptAOIndex ID = 571 + ptBRIndex ID = 572 + ptCHIndex ID = 573 + ptCVIndex ID = 574 + ptGQIndex ID = 575 + ptGWIndex ID = 576 + ptLUIndex ID = 577 + ptMOIndex ID = 578 + ptMZIndex ID = 579 + ptPTIndex ID = 580 + ptSTIndex ID = 581 + ptTLIndex ID = 582 + quIndex ID = 583 + quBOIndex ID = 584 + quECIndex ID = 585 + quPEIndex ID = 586 + rmIndex ID = 587 + rmCHIndex ID = 588 + rnIndex ID = 589 + rnBIIndex ID = 590 + roIndex ID = 591 + roMDIndex ID = 592 + roROIndex ID = 593 + rofIndex ID = 594 + rofTZIndex ID = 595 + ruIndex ID = 596 + ruBYIndex ID = 597 + ruKGIndex ID = 598 + ruKZIndex ID = 599 + ruMDIndex ID = 600 + ruRUIndex ID = 601 + ruUAIndex ID = 602 + rwIndex ID = 603 + rwRWIndex ID = 604 + rwkIndex ID = 605 + rwkTZIndex ID = 606 + sahIndex ID = 607 + sahRUIndex ID = 608 + saqIndex ID = 609 + saqKEIndex ID = 610 + sbpIndex ID = 611 + sbpTZIndex ID = 612 + sdIndex ID = 613 + sdPKIndex ID = 614 + sdhIndex ID = 615 + seIndex ID = 616 + seFIIndex ID = 617 + seNOIndex ID = 618 + seSEIndex ID = 619 + sehIndex ID = 620 + sehMZIndex ID = 621 + sesIndex ID = 622 + sesMLIndex ID = 623 + sgIndex ID = 624 + sgCFIndex ID = 625 + shIndex ID = 626 + shiIndex ID = 627 + shiLatnIndex ID = 628 + shiLatnMAIndex ID = 629 + shiTfngIndex ID = 630 + shiTfngMAIndex ID = 631 + siIndex ID = 632 + siLKIndex ID = 633 + skIndex ID = 634 + skSKIndex ID = 635 + slIndex ID = 636 + slSIIndex ID = 637 + smaIndex ID = 638 + smiIndex ID = 639 + smjIndex ID = 640 + smnIndex ID = 641 + smnFIIndex ID = 642 + smsIndex ID = 643 + snIndex ID = 644 + snZWIndex ID = 645 + soIndex ID = 646 + soDJIndex ID = 647 + soETIndex ID = 648 + soKEIndex ID = 649 + soSOIndex ID = 650 + sqIndex ID = 651 + sqALIndex ID = 652 + sqMKIndex ID = 653 + sqXKIndex ID = 654 + srIndex ID = 655 + srCyrlIndex ID = 656 + srCyrlBAIndex ID = 657 + srCyrlMEIndex ID = 658 + srCyrlRSIndex ID = 659 + srCyrlXKIndex ID = 660 + srLatnIndex ID = 661 + srLatnBAIndex ID = 662 + srLatnMEIndex ID = 663 + srLatnRSIndex ID = 664 + srLatnXKIndex ID = 665 + ssIndex ID = 666 + ssyIndex ID = 667 + stIndex ID = 668 + svIndex ID = 669 + svAXIndex ID = 670 + svFIIndex ID = 671 + svSEIndex ID = 672 + swIndex ID = 673 + swCDIndex ID = 674 + swKEIndex ID = 675 + swTZIndex ID = 676 + swUGIndex ID = 677 + syrIndex ID = 678 + taIndex ID = 679 + taINIndex ID = 680 + taLKIndex ID = 681 + taMYIndex ID = 682 + taSGIndex ID = 683 + teIndex ID = 684 + teINIndex ID = 685 + teoIndex ID = 686 + teoKEIndex ID = 687 + teoUGIndex ID = 688 + tgIndex ID = 689 + tgTJIndex ID = 690 + thIndex ID = 691 + thTHIndex ID = 692 + tiIndex ID = 693 + tiERIndex ID = 694 + tiETIndex ID = 695 + tigIndex ID = 696 + tkIndex ID = 697 + tkTMIndex ID = 698 + tlIndex ID = 699 + tnIndex ID = 700 + toIndex ID = 701 + toTOIndex ID = 702 + trIndex ID = 703 + trCYIndex ID = 704 + trTRIndex ID = 705 + tsIndex ID = 706 + ttIndex ID = 707 + ttRUIndex ID = 708 + twqIndex ID = 709 + twqNEIndex ID = 710 + tzmIndex ID = 711 + tzmMAIndex ID = 712 + ugIndex ID = 713 + ugCNIndex ID = 714 + ukIndex ID = 715 + ukUAIndex ID = 716 + urIndex ID = 717 + urINIndex ID = 718 + urPKIndex ID = 719 + uzIndex ID = 720 + uzArabIndex ID = 721 + uzArabAFIndex ID = 722 + uzCyrlIndex ID = 723 + uzCyrlUZIndex ID = 724 + uzLatnIndex ID = 725 + uzLatnUZIndex ID = 726 + vaiIndex ID = 727 + vaiLatnIndex ID = 728 + vaiLatnLRIndex ID = 729 + vaiVaiiIndex ID = 730 + vaiVaiiLRIndex ID = 731 + veIndex ID = 732 + viIndex ID = 733 + viVNIndex ID = 734 + voIndex ID = 735 + vo001Index ID = 736 + vunIndex ID = 737 + vunTZIndex ID = 738 + waIndex ID = 739 + waeIndex ID = 740 + waeCHIndex ID = 741 + woIndex ID = 742 + woSNIndex ID = 743 + xhIndex ID = 744 + xogIndex ID = 745 + xogUGIndex ID = 746 + yavIndex ID = 747 + yavCMIndex ID = 748 + yiIndex ID = 749 + yi001Index ID = 750 + yoIndex ID = 751 + yoBJIndex ID = 752 + yoNGIndex ID = 753 + yueIndex ID = 754 + yueHansIndex ID = 755 + yueHansCNIndex ID = 756 + yueHantIndex ID = 757 + yueHantHKIndex ID = 758 + zghIndex ID = 759 + zghMAIndex ID = 760 + zhIndex ID = 761 + zhHansIndex ID = 762 + zhHansCNIndex ID = 763 + zhHansHKIndex ID = 764 + zhHansMOIndex ID = 765 + zhHansSGIndex ID = 766 + zhHantIndex ID = 767 + zhHantHKIndex ID = 768 + zhHantMOIndex ID = 769 + zhHantTWIndex ID = 770 + zuIndex ID = 771 + zuZAIndex ID = 772 + caESvalenciaIndex ID = 773 + enUSuvaposixIndex ID = 774 +) + +var coreTags = []language.CompactCoreInfo{ // 773 elements + // Entry 0 - 1F + 0x00000000, 0x01600000, 0x016000d3, 0x01600162, + 0x01c00000, 0x01c00052, 0x02100000, 0x02100081, + 0x02700000, 0x02700070, 0x03a00000, 0x03a00001, + 0x03a00023, 0x03a00039, 0x03a00063, 0x03a00068, + 0x03a0006c, 0x03a0006d, 0x03a0006e, 0x03a00098, + 0x03a0009c, 0x03a000a2, 0x03a000a9, 0x03a000ad, + 0x03a000b1, 0x03a000ba, 0x03a000bb, 0x03a000ca, + 0x03a000e2, 0x03a000ee, 0x03a000f4, 0x03a00109, + // Entry 20 - 3F + 0x03a0010c, 0x03a00116, 0x03a00118, 0x03a0011d, + 0x03a00121, 0x03a00129, 0x03a0015f, 0x04000000, + 0x04300000, 0x0430009a, 0x04400000, 0x04400130, + 0x04800000, 0x0480006f, 0x05800000, 0x05820000, + 0x05820032, 0x0585b000, 0x0585b032, 0x05e00000, + 0x05e00052, 0x07100000, 0x07100047, 0x07500000, + 0x07500163, 0x07900000, 0x07900130, 0x07e00000, + 0x07e00038, 0x08200000, 0x0a000000, 0x0a0000c4, + // Entry 40 - 5F + 0x0a500000, 0x0a500035, 0x0a50009a, 0x0a900000, + 0x0a900053, 0x0a90009a, 0x0b200000, 0x0b200079, + 0x0b500000, 0x0b50009a, 0x0b700000, 0x0b720000, + 0x0b720033, 0x0b75b000, 0x0b75b033, 0x0d700000, + 0x0d700022, 0x0d70006f, 0x0d700079, 0x0d70009f, + 0x0db00000, 0x0db00035, 0x0db0009a, 0x0dc00000, + 0x0dc00107, 0x0df00000, 0x0df00132, 0x0e500000, + 0x0e500136, 0x0e900000, 0x0e90009c, 0x0e90009d, + // Entry 60 - 7F + 0x0fa00000, 0x0fa0005f, 0x0fe00000, 0x0fe00107, + 0x10000000, 0x1000007c, 0x10100000, 0x10100064, + 0x10100083, 0x10800000, 0x108000a5, 0x10d00000, + 0x10d0002e, 0x10d00036, 0x10d0004e, 0x10d00061, + 0x10d0009f, 0x10d000b3, 0x10d000b8, 0x11700000, + 0x117000d5, 0x11f00000, 0x11f00061, 0x12400000, + 0x12400052, 0x12800000, 0x12b00000, 0x12b00115, + 0x12d00000, 0x12d00043, 0x12f00000, 0x12f000a5, + // Entry 80 - 9F + 0x13000000, 0x13000081, 0x13000123, 0x13600000, + 0x1360005e, 0x13600088, 0x13900000, 0x13900001, + 0x1390001a, 0x13900025, 0x13900026, 0x1390002d, + 0x1390002e, 0x1390002f, 0x13900034, 0x13900036, + 0x1390003a, 0x1390003d, 0x13900042, 0x13900046, + 0x13900048, 0x13900049, 0x1390004a, 0x1390004e, + 0x13900050, 0x13900052, 0x1390005d, 0x1390005e, + 0x13900061, 0x13900062, 0x13900064, 0x13900065, + // Entry A0 - BF + 0x1390006e, 0x13900073, 0x13900074, 0x13900075, + 0x13900076, 0x1390007c, 0x1390007d, 0x13900080, + 0x13900081, 0x13900082, 0x13900084, 0x1390008b, + 0x1390008d, 0x1390008e, 0x13900097, 0x13900098, + 0x13900099, 0x1390009a, 0x1390009b, 0x139000a0, + 0x139000a1, 0x139000a5, 0x139000a8, 0x139000aa, + 0x139000ae, 0x139000b2, 0x139000b5, 0x139000b6, + 0x139000c0, 0x139000c1, 0x139000c7, 0x139000c8, + // Entry C0 - DF + 0x139000cb, 0x139000cc, 0x139000cd, 0x139000cf, + 0x139000d1, 0x139000d3, 0x139000d6, 0x139000d7, + 0x139000da, 0x139000de, 0x139000e0, 0x139000e1, + 0x139000e7, 0x139000e8, 0x139000e9, 0x139000ec, + 0x139000ed, 0x139000f1, 0x13900108, 0x1390010a, + 0x1390010b, 0x1390010c, 0x1390010d, 0x1390010e, + 0x1390010f, 0x13900110, 0x13900113, 0x13900118, + 0x1390011c, 0x1390011e, 0x13900120, 0x13900126, + // Entry E0 - FF + 0x1390012a, 0x1390012d, 0x1390012e, 0x13900130, + 0x13900132, 0x13900134, 0x13900136, 0x1390013a, + 0x1390013d, 0x1390013e, 0x13900140, 0x13900143, + 0x13900162, 0x13900163, 0x13900165, 0x13c00000, + 0x13c00001, 0x13e00000, 0x13e0001f, 0x13e0002c, + 0x13e0003f, 0x13e00041, 0x13e00048, 0x13e00051, + 0x13e00054, 0x13e00057, 0x13e0005a, 0x13e00066, + 0x13e00069, 0x13e0006a, 0x13e0006f, 0x13e00087, + // Entry 100 - 11F + 0x13e0008a, 0x13e00090, 0x13e00095, 0x13e000d0, + 0x13e000d9, 0x13e000e3, 0x13e000e5, 0x13e000e8, + 0x13e000ed, 0x13e000f2, 0x13e0011b, 0x13e00136, + 0x13e00137, 0x13e0013c, 0x14000000, 0x1400006b, + 0x14500000, 0x1450006f, 0x14600000, 0x14600052, + 0x14800000, 0x14800024, 0x1480009d, 0x14e00000, + 0x14e00052, 0x14e00085, 0x14e000ca, 0x14e00115, + 0x15100000, 0x15100073, 0x15300000, 0x153000e8, + // Entry 120 - 13F + 0x15800000, 0x15800064, 0x15800077, 0x15e00000, + 0x15e00036, 0x15e00037, 0x15e0003a, 0x15e0003b, + 0x15e0003c, 0x15e00049, 0x15e0004b, 0x15e0004c, + 0x15e0004d, 0x15e0004e, 0x15e0004f, 0x15e00052, + 0x15e00063, 0x15e00068, 0x15e00079, 0x15e0007b, + 0x15e0007f, 0x15e00085, 0x15e00086, 0x15e00087, + 0x15e00092, 0x15e000a9, 0x15e000b8, 0x15e000bb, + 0x15e000bc, 0x15e000bf, 0x15e000c0, 0x15e000c4, + // Entry 140 - 15F + 0x15e000c9, 0x15e000ca, 0x15e000cd, 0x15e000d4, + 0x15e000d5, 0x15e000e6, 0x15e000eb, 0x15e00103, + 0x15e00108, 0x15e0010b, 0x15e00115, 0x15e0011d, + 0x15e00121, 0x15e00123, 0x15e00129, 0x15e00140, + 0x15e00141, 0x15e00160, 0x16900000, 0x1690009f, + 0x16d00000, 0x16d000da, 0x16e00000, 0x16e00097, + 0x17e00000, 0x17e0007c, 0x19000000, 0x1900006f, + 0x1a300000, 0x1a30004e, 0x1a300079, 0x1a3000b3, + // Entry 160 - 17F + 0x1a400000, 0x1a40009a, 0x1a900000, 0x1ab00000, + 0x1ab000a5, 0x1ac00000, 0x1ac00099, 0x1b400000, + 0x1b400081, 0x1b4000d5, 0x1b4000d7, 0x1b800000, + 0x1b800136, 0x1bc00000, 0x1bc00098, 0x1be00000, + 0x1be0009a, 0x1d100000, 0x1d100033, 0x1d100091, + 0x1d200000, 0x1d200061, 0x1d500000, 0x1d500093, + 0x1d700000, 0x1d700028, 0x1e100000, 0x1e100096, + 0x1e700000, 0x1e7000d7, 0x1ea00000, 0x1ea00053, + // Entry 180 - 19F + 0x1f300000, 0x1f500000, 0x1f800000, 0x1f80009e, + 0x1f900000, 0x1f90004e, 0x1f90009f, 0x1f900114, + 0x1f900139, 0x1fa00000, 0x1fb00000, 0x20000000, + 0x200000a3, 0x20300000, 0x20700000, 0x20700052, + 0x20800000, 0x20a00000, 0x20a00130, 0x20e00000, + 0x20f00000, 0x21000000, 0x2100007e, 0x21200000, + 0x21200068, 0x21600000, 0x21700000, 0x217000a5, + 0x21f00000, 0x22300000, 0x22300130, 0x22700000, + // Entry 1A0 - 1BF + 0x2270005b, 0x23400000, 0x234000c4, 0x23900000, + 0x239000a5, 0x24200000, 0x242000af, 0x24400000, + 0x24400052, 0x24500000, 0x24500083, 0x24600000, + 0x246000a5, 0x24a00000, 0x24a000a7, 0x25100000, + 0x2510009a, 0x25400000, 0x254000ab, 0x254000ac, + 0x25600000, 0x2560009a, 0x26a00000, 0x26a0009a, + 0x26b00000, 0x26b00130, 0x26d00000, 0x26d00052, + 0x26e00000, 0x26e00061, 0x27400000, 0x28100000, + // Entry 1C0 - 1DF + 0x2810007c, 0x28a00000, 0x28a000a6, 0x29100000, + 0x29100130, 0x29500000, 0x295000b8, 0x2a300000, + 0x2a300132, 0x2af00000, 0x2af00136, 0x2b500000, + 0x2b50002a, 0x2b50004b, 0x2b50004c, 0x2b50004d, + 0x2b800000, 0x2b8000b0, 0x2bf00000, 0x2bf0009c, + 0x2bf0009d, 0x2c000000, 0x2c0000b7, 0x2c200000, + 0x2c20004b, 0x2c400000, 0x2c4000a5, 0x2c500000, + 0x2c5000a5, 0x2c700000, 0x2c7000b9, 0x2d100000, + // Entry 1E0 - 1FF + 0x2d1000a5, 0x2d100130, 0x2e900000, 0x2e9000a5, + 0x2ed00000, 0x2ed000cd, 0x2f100000, 0x2f1000c0, + 0x2f200000, 0x2f2000d2, 0x2f400000, 0x2f400052, + 0x2ff00000, 0x2ff000c3, 0x30400000, 0x3040009a, + 0x30b00000, 0x30b000c6, 0x31000000, 0x31b00000, + 0x31b0009a, 0x31f00000, 0x31f0003e, 0x31f000d1, + 0x31f0010e, 0x32000000, 0x320000cc, 0x32500000, + 0x32500052, 0x33100000, 0x331000c5, 0x33a00000, + // Entry 200 - 21F + 0x33a0009d, 0x34100000, 0x34500000, 0x345000d3, + 0x34700000, 0x347000db, 0x34700111, 0x34e00000, + 0x34e00165, 0x35000000, 0x35000061, 0x350000da, + 0x35100000, 0x3510009a, 0x351000dc, 0x36700000, + 0x36700030, 0x36700036, 0x36700040, 0x3670005c, + 0x367000da, 0x36700117, 0x3670011c, 0x36800000, + 0x36800052, 0x36a00000, 0x36a000db, 0x36c00000, + 0x36c00052, 0x36f00000, 0x37500000, 0x37600000, + // Entry 220 - 23F + 0x37a00000, 0x38000000, 0x38000118, 0x38700000, + 0x38900000, 0x38900132, 0x39000000, 0x39000070, + 0x390000a5, 0x39500000, 0x3950009a, 0x39800000, + 0x3980007e, 0x39800107, 0x39d00000, 0x39d05000, + 0x39d050e9, 0x39d36000, 0x39d3609a, 0x3a100000, + 0x3b300000, 0x3b3000ea, 0x3bd00000, 0x3bd00001, + 0x3be00000, 0x3be00024, 0x3c000000, 0x3c00002a, + 0x3c000041, 0x3c00004e, 0x3c00005b, 0x3c000087, + // Entry 240 - 25F + 0x3c00008c, 0x3c0000b8, 0x3c0000c7, 0x3c0000d2, + 0x3c0000ef, 0x3c000119, 0x3c000127, 0x3c400000, + 0x3c40003f, 0x3c40006a, 0x3c4000e5, 0x3d400000, + 0x3d40004e, 0x3d900000, 0x3d90003a, 0x3dc00000, + 0x3dc000bd, 0x3dc00105, 0x3de00000, 0x3de00130, + 0x3e200000, 0x3e200047, 0x3e2000a6, 0x3e2000af, + 0x3e2000bd, 0x3e200107, 0x3e200131, 0x3e500000, + 0x3e500108, 0x3e600000, 0x3e600130, 0x3eb00000, + // Entry 260 - 27F + 0x3eb00107, 0x3ec00000, 0x3ec000a5, 0x3f300000, + 0x3f300130, 0x3fa00000, 0x3fa000e9, 0x3fc00000, + 0x3fd00000, 0x3fd00073, 0x3fd000db, 0x3fd0010d, + 0x3ff00000, 0x3ff000d2, 0x40100000, 0x401000c4, + 0x40200000, 0x4020004c, 0x40700000, 0x40800000, + 0x4085b000, 0x4085b0bb, 0x408eb000, 0x408eb0bb, + 0x40c00000, 0x40c000b4, 0x41200000, 0x41200112, + 0x41600000, 0x41600110, 0x41c00000, 0x41d00000, + // Entry 280 - 29F + 0x41e00000, 0x41f00000, 0x41f00073, 0x42200000, + 0x42300000, 0x42300165, 0x42900000, 0x42900063, + 0x42900070, 0x429000a5, 0x42900116, 0x43100000, + 0x43100027, 0x431000c3, 0x4310014e, 0x43200000, + 0x43220000, 0x43220033, 0x432200be, 0x43220106, + 0x4322014e, 0x4325b000, 0x4325b033, 0x4325b0be, + 0x4325b106, 0x4325b14e, 0x43700000, 0x43a00000, + 0x43b00000, 0x44400000, 0x44400031, 0x44400073, + // Entry 2A0 - 2BF + 0x4440010d, 0x44500000, 0x4450004b, 0x445000a5, + 0x44500130, 0x44500132, 0x44e00000, 0x45000000, + 0x4500009a, 0x450000b4, 0x450000d1, 0x4500010e, + 0x46100000, 0x4610009a, 0x46400000, 0x464000a5, + 0x46400132, 0x46700000, 0x46700125, 0x46b00000, + 0x46b00124, 0x46f00000, 0x46f0006e, 0x46f00070, + 0x47100000, 0x47600000, 0x47600128, 0x47a00000, + 0x48000000, 0x48200000, 0x4820012a, 0x48a00000, + // Entry 2C0 - 2DF + 0x48a0005e, 0x48a0012c, 0x48e00000, 0x49400000, + 0x49400107, 0x4a400000, 0x4a4000d5, 0x4a900000, + 0x4a9000bb, 0x4ac00000, 0x4ac00053, 0x4ae00000, + 0x4ae00131, 0x4b400000, 0x4b40009a, 0x4b4000e9, + 0x4bc00000, 0x4bc05000, 0x4bc05024, 0x4bc20000, + 0x4bc20138, 0x4bc5b000, 0x4bc5b138, 0x4be00000, + 0x4be5b000, 0x4be5b0b5, 0x4bef4000, 0x4bef40b5, + 0x4c000000, 0x4c300000, 0x4c30013f, 0x4c900000, + // Entry 2E0 - 2FF + 0x4c900001, 0x4cc00000, 0x4cc00130, 0x4ce00000, + 0x4cf00000, 0x4cf0004e, 0x4e500000, 0x4e500115, + 0x4f200000, 0x4fb00000, 0x4fb00132, 0x50900000, + 0x50900052, 0x51200000, 0x51200001, 0x51800000, + 0x5180003b, 0x518000d7, 0x51f00000, 0x51f3b000, + 0x51f3b053, 0x51f3c000, 0x51f3c08e, 0x52800000, + 0x528000bb, 0x52900000, 0x5293b000, 0x5293b053, + 0x5293b08e, 0x5293b0c7, 0x5293b10e, 0x5293c000, + // Entry 300 - 31F + 0x5293c08e, 0x5293c0c7, 0x5293c12f, 0x52f00000, + 0x52f00162, +} // Size: 3116 bytes + +const specialTagsStr string = "ca-ES-valencia en-US-u-va-posix" + +// Total table size 3147 bytes (3KiB); checksum: 5A8FFFA5 diff --git a/vendor/golang.org/x/text/internal/language/compact/tags.go b/vendor/golang.org/x/text/internal/language/compact/tags.go new file mode 100644 index 00000000000..ca135d295ae --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/tags.go @@ -0,0 +1,91 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package compact + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag{language: afIndex, locale: afIndex} + Amharic Tag = Tag{language: amIndex, locale: amIndex} + Arabic Tag = Tag{language: arIndex, locale: arIndex} + ModernStandardArabic Tag = Tag{language: ar001Index, locale: ar001Index} + Azerbaijani Tag = Tag{language: azIndex, locale: azIndex} + Bulgarian Tag = Tag{language: bgIndex, locale: bgIndex} + Bengali Tag = Tag{language: bnIndex, locale: bnIndex} + Catalan Tag = Tag{language: caIndex, locale: caIndex} + Czech Tag = Tag{language: csIndex, locale: csIndex} + Danish Tag = Tag{language: daIndex, locale: daIndex} + German Tag = Tag{language: deIndex, locale: deIndex} + Greek Tag = Tag{language: elIndex, locale: elIndex} + English Tag = Tag{language: enIndex, locale: enIndex} + AmericanEnglish Tag = Tag{language: enUSIndex, locale: enUSIndex} + BritishEnglish Tag = Tag{language: enGBIndex, locale: enGBIndex} + Spanish Tag = Tag{language: esIndex, locale: esIndex} + EuropeanSpanish Tag = Tag{language: esESIndex, locale: esESIndex} + LatinAmericanSpanish Tag = Tag{language: es419Index, locale: es419Index} + Estonian Tag = Tag{language: etIndex, locale: etIndex} + Persian Tag = Tag{language: faIndex, locale: faIndex} + Finnish Tag = Tag{language: fiIndex, locale: fiIndex} + Filipino Tag = Tag{language: filIndex, locale: filIndex} + French Tag = Tag{language: frIndex, locale: frIndex} + CanadianFrench Tag = Tag{language: frCAIndex, locale: frCAIndex} + Gujarati Tag = Tag{language: guIndex, locale: guIndex} + Hebrew Tag = Tag{language: heIndex, locale: heIndex} + Hindi Tag = Tag{language: hiIndex, locale: hiIndex} + Croatian Tag = Tag{language: hrIndex, locale: hrIndex} + Hungarian Tag = Tag{language: huIndex, locale: huIndex} + Armenian Tag = Tag{language: hyIndex, locale: hyIndex} + Indonesian Tag = Tag{language: idIndex, locale: idIndex} + Icelandic Tag = Tag{language: isIndex, locale: isIndex} + Italian Tag = Tag{language: itIndex, locale: itIndex} + Japanese Tag = Tag{language: jaIndex, locale: jaIndex} + Georgian Tag = Tag{language: kaIndex, locale: kaIndex} + Kazakh Tag = Tag{language: kkIndex, locale: kkIndex} + Khmer Tag = Tag{language: kmIndex, locale: kmIndex} + Kannada Tag = Tag{language: knIndex, locale: knIndex} + Korean Tag = Tag{language: koIndex, locale: koIndex} + Kirghiz Tag = Tag{language: kyIndex, locale: kyIndex} + Lao Tag = Tag{language: loIndex, locale: loIndex} + Lithuanian Tag = Tag{language: ltIndex, locale: ltIndex} + Latvian Tag = Tag{language: lvIndex, locale: lvIndex} + Macedonian Tag = Tag{language: mkIndex, locale: mkIndex} + Malayalam Tag = Tag{language: mlIndex, locale: mlIndex} + Mongolian Tag = Tag{language: mnIndex, locale: mnIndex} + Marathi Tag = Tag{language: mrIndex, locale: mrIndex} + Malay Tag = Tag{language: msIndex, locale: msIndex} + Burmese Tag = Tag{language: myIndex, locale: myIndex} + Nepali Tag = Tag{language: neIndex, locale: neIndex} + Dutch Tag = Tag{language: nlIndex, locale: nlIndex} + Norwegian Tag = Tag{language: noIndex, locale: noIndex} + Punjabi Tag = Tag{language: paIndex, locale: paIndex} + Polish Tag = Tag{language: plIndex, locale: plIndex} + Portuguese Tag = Tag{language: ptIndex, locale: ptIndex} + BrazilianPortuguese Tag = Tag{language: ptBRIndex, locale: ptBRIndex} + EuropeanPortuguese Tag = Tag{language: ptPTIndex, locale: ptPTIndex} + Romanian Tag = Tag{language: roIndex, locale: roIndex} + Russian Tag = Tag{language: ruIndex, locale: ruIndex} + Sinhala Tag = Tag{language: siIndex, locale: siIndex} + Slovak Tag = Tag{language: skIndex, locale: skIndex} + Slovenian Tag = Tag{language: slIndex, locale: slIndex} + Albanian Tag = Tag{language: sqIndex, locale: sqIndex} + Serbian Tag = Tag{language: srIndex, locale: srIndex} + SerbianLatin Tag = Tag{language: srLatnIndex, locale: srLatnIndex} + Swedish Tag = Tag{language: svIndex, locale: svIndex} + Swahili Tag = Tag{language: swIndex, locale: swIndex} + Tamil Tag = Tag{language: taIndex, locale: taIndex} + Telugu Tag = Tag{language: teIndex, locale: teIndex} + Thai Tag = Tag{language: thIndex, locale: thIndex} + Turkish Tag = Tag{language: trIndex, locale: trIndex} + Ukrainian Tag = Tag{language: ukIndex, locale: ukIndex} + Urdu Tag = Tag{language: urIndex, locale: urIndex} + Uzbek Tag = Tag{language: uzIndex, locale: uzIndex} + Vietnamese Tag = Tag{language: viIndex, locale: viIndex} + Chinese Tag = Tag{language: zhIndex, locale: zhIndex} + SimplifiedChinese Tag = Tag{language: zhHansIndex, locale: zhHansIndex} + TraditionalChinese Tag = Tag{language: zhHantIndex, locale: zhHantIndex} + Zulu Tag = Tag{language: zuIndex, locale: zuIndex} +) diff --git a/vendor/golang.org/x/text/internal/language/compose.go b/vendor/golang.org/x/text/internal/language/compose.go new file mode 100644 index 00000000000..4ae78e0fa5f --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compose.go @@ -0,0 +1,167 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "sort" + "strings" +) + +// A Builder allows constructing a Tag from individual components. +// Its main user is Compose in the top-level language package. +type Builder struct { + Tag Tag + + private string // the x extension + variants []string + extensions []string +} + +// Make returns a new Tag from the current settings. +func (b *Builder) Make() Tag { + t := b.Tag + + if len(b.extensions) > 0 || len(b.variants) > 0 { + sort.Sort(sortVariants(b.variants)) + sort.Strings(b.extensions) + + if b.private != "" { + b.extensions = append(b.extensions, b.private) + } + n := maxCoreSize + tokenLen(b.variants...) + tokenLen(b.extensions...) + buf := make([]byte, n) + p := t.genCoreBytes(buf) + t.pVariant = byte(p) + p += appendTokens(buf[p:], b.variants...) + t.pExt = uint16(p) + p += appendTokens(buf[p:], b.extensions...) + t.str = string(buf[:p]) + // We may not always need to remake the string, but when or when not + // to do so is rather tricky. + scan := makeScanner(buf[:p]) + t, _ = parse(&scan, "") + return t + + } else if b.private != "" { + t.str = b.private + t.RemakeString() + } + return t +} + +// SetTag copies all the settings from a given Tag. Any previously set values +// are discarded. +func (b *Builder) SetTag(t Tag) { + b.Tag.LangID = t.LangID + b.Tag.RegionID = t.RegionID + b.Tag.ScriptID = t.ScriptID + // TODO: optimize + b.variants = b.variants[:0] + if variants := t.Variants(); variants != "" { + for _, vr := range strings.Split(variants[1:], "-") { + b.variants = append(b.variants, vr) + } + } + b.extensions, b.private = b.extensions[:0], "" + for _, e := range t.Extensions() { + b.AddExt(e) + } +} + +// AddExt adds extension e to the tag. e must be a valid extension as returned +// by Tag.Extension. If the extension already exists, it will be discarded, +// except for a -u extension, where non-existing key-type pairs will added. +func (b *Builder) AddExt(e string) { + if e[0] == 'x' { + if b.private == "" { + b.private = e + } + return + } + for i, s := range b.extensions { + if s[0] == e[0] { + if e[0] == 'u' { + b.extensions[i] += e[1:] + } + return + } + } + b.extensions = append(b.extensions, e) +} + +// SetExt sets the extension e to the tag. e must be a valid extension as +// returned by Tag.Extension. If the extension already exists, it will be +// overwritten, except for a -u extension, where the individual key-type pairs +// will be set. +func (b *Builder) SetExt(e string) { + if e[0] == 'x' { + b.private = e + return + } + for i, s := range b.extensions { + if s[0] == e[0] { + if e[0] == 'u' { + b.extensions[i] = e + s[1:] + } else { + b.extensions[i] = e + } + return + } + } + b.extensions = append(b.extensions, e) +} + +// AddVariant adds any number of variants. +func (b *Builder) AddVariant(v ...string) { + for _, v := range v { + if v != "" { + b.variants = append(b.variants, v) + } + } +} + +// ClearVariants removes any variants previously added, including those +// copied from a Tag in SetTag. +func (b *Builder) ClearVariants() { + b.variants = b.variants[:0] +} + +// ClearExtensions removes any extensions previously added, including those +// copied from a Tag in SetTag. +func (b *Builder) ClearExtensions() { + b.private = "" + b.extensions = b.extensions[:0] +} + +func tokenLen(token ...string) (n int) { + for _, t := range token { + n += len(t) + 1 + } + return +} + +func appendTokens(b []byte, token ...string) int { + p := 0 + for _, t := range token { + b[p] = '-' + copy(b[p+1:], t) + p += 1 + len(t) + } + return p +} + +type sortVariants []string + +func (s sortVariants) Len() int { + return len(s) +} + +func (s sortVariants) Swap(i, j int) { + s[j], s[i] = s[i], s[j] +} + +func (s sortVariants) Less(i, j int) bool { + return variantIndex[s[i]] < variantIndex[s[j]] +} diff --git a/vendor/golang.org/x/text/internal/language/coverage.go b/vendor/golang.org/x/text/internal/language/coverage.go new file mode 100644 index 00000000000..9b20b88feb8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/coverage.go @@ -0,0 +1,28 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func BaseLanguages() []Language { + base := make([]Language, 0, NumLanguages) + for i := 0; i < langNoIndexOffset; i++ { + // We included "und" already for the value 0. + if i != nonCanonicalUnd { + base = append(base, Language(i)) + } + } + i := langNoIndexOffset + for _, v := range langNoIndex { + for k := 0; k < 8; k++ { + if v&1 == 1 { + base = append(base, Language(i)) + } + v >>= 1 + i++ + } + } + return base +} diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go new file mode 100644 index 00000000000..09d41c73670 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go -output tables.go + +package language // import "golang.org/x/text/internal/language" + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "errors" + "fmt" + "strings" +) + +const ( + // maxCoreSize is the maximum size of a BCP 47 tag without variants and + // extensions. Equals max lang (3) + script (4) + max reg (3) + 2 dashes. + maxCoreSize = 12 + + // max99thPercentileSize is a somewhat arbitrary buffer size that presumably + // is large enough to hold at least 99% of the BCP 47 tags. + max99thPercentileSize = 32 + + // maxSimpleUExtensionSize is the maximum size of a -u extension with one + // key-type pair. Equals len("-u-") + key (2) + dash + max value (8). + maxSimpleUExtensionSize = 14 +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. The zero value of Tag is Und. +type Tag struct { + // TODO: the following fields have the form TagTypeID. This name is chosen + // to allow refactoring the public package without conflicting with its + // Base, Script, and Region methods. Once the transition is fully completed + // the ID can be stripped from the name. + + LangID Language + RegionID Region + // TODO: we will soon run out of positions for ScriptID. Idea: instead of + // storing lang, region, and ScriptID codes, store only the compact index and + // have a lookup table from this code to its expansion. This greatly speeds + // up table lookup, speed up common variant cases. + // This will also immediately free up 3 extra bytes. Also, the pVariant + // field can now be moved to the lookup table, as the compact index uniquely + // determines the offset of a possible variant. + ScriptID Script + pVariant byte // offset in str, includes preceding '-' + pExt uint16 // offset of first extension, includes preceding '-' + + // str is the string representation of the Tag. It will only be used if the + // tag has variants or extensions. + str string +} + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + t, _ := Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +// TODO: consider removing +func (t Tag) Raw() (b Language, s Script, r Region) { + return t.LangID, t.ScriptID, t.RegionID +} + +// equalTags compares language, script and region subtags only. +func (t Tag) equalTags(a Tag) bool { + return t.LangID == a.LangID && t.ScriptID == a.ScriptID && t.RegionID == a.RegionID +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + if int(t.pVariant) < len(t.str) { + return false + } + return t.equalTags(Und) +} + +// IsPrivateUse reports whether the Tag consists solely of an IsPrivateUse use +// tag. +func (t Tag) IsPrivateUse() bool { + return t.str != "" && t.pVariant == 0 +} + +// RemakeString is used to update t.str in case lang, script or region changed. +// It is assumed that pExt and pVariant still point to the start of the +// respective parts. +func (t *Tag) RemakeString() { + if t.str == "" { + return + } + extra := t.str[t.pVariant:] + if t.pVariant > 0 { + extra = extra[1:] + } + if t.equalTags(Und) && strings.HasPrefix(extra, "x-") { + t.str = extra + t.pVariant = 0 + t.pExt = 0 + return + } + var buf [max99thPercentileSize]byte // avoid extra memory allocation in most cases. + b := buf[:t.genCoreBytes(buf[:])] + if extra != "" { + diff := len(b) - int(t.pVariant) + b = append(b, '-') + b = append(b, extra...) + t.pVariant = uint8(int(t.pVariant) + diff) + t.pExt = uint16(int(t.pExt) + diff) + } else { + t.pVariant = uint8(len(b)) + t.pExt = uint16(len(b)) + } + t.str = string(b) +} + +// genCoreBytes writes a string for the base languages, script and region tags +// to the given buffer and returns the number of bytes written. It will never +// write more than maxCoreSize bytes. +func (t *Tag) genCoreBytes(buf []byte) int { + n := t.LangID.StringToBuf(buf[:]) + if t.ScriptID != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.ScriptID.String()) + } + if t.RegionID != 0 { + n += copy(buf[n:], "-") + n += copy(buf[n:], t.RegionID.String()) + } + return n +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + if t.str != "" { + return t.str + } + if t.ScriptID == 0 && t.RegionID == 0 { + return t.LangID.String() + } + buf := [maxCoreSize]byte{} + return string(buf[:t.genCoreBytes(buf[:])]) +} + +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + if t.str != "" { + text = append(text, t.str...) + } else if t.ScriptID == 0 && t.RegionID == 0 { + text = append(text, t.LangID.String()...) + } else { + buf := [maxCoreSize]byte{} + text = buf[:t.genCoreBytes(buf[:])] + } + return text, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + tag, err := Parse(string(text)) + *t = tag + return err +} + +// Variants returns the part of the tag holding all variants or the empty string +// if there are no variants defined. +func (t Tag) Variants() string { + if t.pVariant == 0 { + return "" + } + return t.str[t.pVariant:t.pExt] +} + +// VariantOrPrivateUseTags returns variants or private use tags. +func (t Tag) VariantOrPrivateUseTags() string { + if t.pExt > 0 { + return t.str[t.pVariant:t.pExt] + } + return t.str[t.pVariant:] +} + +// HasString reports whether this tag defines more than just the raw +// components. +func (t Tag) HasString() bool { + return t.str != "" +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +func (t Tag) Parent() Tag { + if t.str != "" { + // Strip the variants and extensions. + b, s, r := t.Raw() + t = Tag{LangID: b, ScriptID: s, RegionID: r} + if t.RegionID == 0 && t.ScriptID != 0 && t.LangID != 0 { + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID == t.ScriptID { + return Tag{LangID: t.LangID} + } + } + return t + } + if t.LangID != 0 { + if t.RegionID != 0 { + maxScript := t.ScriptID + if maxScript == 0 { + max, _ := addTags(t) + maxScript = max.ScriptID + } + + for i := range parents { + if Language(parents[i].lang) == t.LangID && Script(parents[i].maxScript) == maxScript { + for _, r := range parents[i].fromRegion { + if Region(r) == t.RegionID { + return Tag{ + LangID: t.LangID, + ScriptID: Script(parents[i].script), + RegionID: Region(parents[i].toRegion), + } + } + } + } + } + + // Strip the script if it is the default one. + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID != maxScript { + return Tag{LangID: t.LangID, ScriptID: maxScript} + } + return Tag{LangID: t.LangID} + } else if t.ScriptID != 0 { + // The parent for an base-script pair with a non-default script is + // "und" instead of the base language. + base, _ := addTags(Tag{LangID: t.LangID}) + if base.ScriptID != t.ScriptID { + return Und + } + return Tag{LangID: t.LangID} + } + } + return Und +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (ext string, err error) { + defer func() { + if recover() != nil { + ext = "" + err = ErrSyntax + } + }() + + scan := makeScannerString(s) + var end int + if n := len(scan.token); n != 1 { + return "", ErrSyntax + } + scan.toLower(0, len(scan.b)) + end = parseExtension(&scan) + if end != len(s) { + return "", ErrSyntax + } + return string(scan.b), nil +} + +// HasVariants reports whether t has variants. +func (t Tag) HasVariants() bool { + return uint16(t.pVariant) < t.pExt +} + +// HasExtensions reports whether t has extensions. +func (t Tag) HasExtensions() bool { + return int(t.pExt) < len(t.str) +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext string, ok bool) { + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + if ext[0] == x { + return ext, true + } + } + return "", false +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []string { + e := []string{} + for i := int(t.pExt); i < len(t.str)-1; { + var ext string + i, ext = getExtension(t.str, i) + e = append(e, ext) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. +func (t Tag) TypeForKey(key string) string { + if _, start, end, _ := t.findTypeForKey(key); end != start { + s := t.str[start:end] + if p := strings.IndexByte(s, '-'); p >= 0 { + s = s[:p] + } + return s + } + return "" +} + +var ( + errPrivateUse = errors.New("cannot set a key on a private use tag") + errInvalidArguments = errors.New("invalid key or type") +) + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + if t.IsPrivateUse() { + return t, errPrivateUse + } + if len(key) != 2 { + return t, errInvalidArguments + } + + // Remove the setting if value is "". + if value == "" { + start, sep, end, _ := t.findTypeForKey(key) + if start != sep { + // Remove a possible empty extension. + switch { + case t.str[start-2] != '-': // has previous elements. + case end == len(t.str), // end of string + end+2 < len(t.str) && t.str[end+2] == '-': // end of extension + start -= 2 + } + if start == int(t.pVariant) && end == len(t.str) { + t.str = "" + t.pVariant, t.pExt = 0, 0 + } else { + t.str = fmt.Sprintf("%s%s", t.str[:start], t.str[end:]) + } + } + return t, nil + } + + if len(value) < 3 || len(value) > 8 { + return t, errInvalidArguments + } + + var ( + buf [maxCoreSize + maxSimpleUExtensionSize]byte + uStart int // start of the -u extension. + ) + + // Generate the tag string if needed. + if t.str == "" { + uStart = t.genCoreBytes(buf[:]) + buf[uStart] = '-' + uStart++ + } + + // Create new key-type pair and parse it to verify. + b := buf[uStart:] + copy(b, "u-") + copy(b[2:], key) + b[4] = '-' + b = b[:5+copy(b[5:], value)] + scan := makeScanner(b) + if parseExtensions(&scan); scan.err != nil { + return t, scan.err + } + + // Assemble the replacement string. + if t.str == "" { + t.pVariant, t.pExt = byte(uStart-1), uint16(uStart-1) + t.str = string(buf[:uStart+len(b)]) + } else { + s := t.str + start, sep, end, hasExt := t.findTypeForKey(key) + if start == sep { + if hasExt { + b = b[2:] + } + t.str = fmt.Sprintf("%s-%s%s", s[:sep], b, s[end:]) + } else { + t.str = fmt.Sprintf("%s-%s%s", s[:start+3], value, s[end:]) + } + } + return t, nil +} + +// findTypeForKey returns the start and end position for the type corresponding +// to key or the point at which to insert the key-value pair if the type +// wasn't found. The hasExt return value reports whether an -u extension was present. +// Note: the extensions are typically very small and are likely to contain +// only one key-type pair. +func (t Tag) findTypeForKey(key string) (start, sep, end int, hasExt bool) { + p := int(t.pExt) + if len(key) != 2 || p == len(t.str) || p == 0 { + return p, p, p, false + } + s := t.str + + // Find the correct extension. + for p++; s[p] != 'u'; p++ { + if s[p] > 'u' { + p-- + return p, p, p, false + } + if p = nextExtension(s, p); p == len(s) { + return len(s), len(s), len(s), false + } + } + // Proceed to the hyphen following the extension name. + p++ + + // curKey is the key currently being processed. + curKey := "" + + // Iterate over keys until we get the end of a section. + for { + end = p + for p++; p < len(s) && s[p] != '-'; p++ { + } + n := p - end - 1 + if n <= 2 && curKey == key { + if sep < end { + sep++ + } + return start, sep, end, true + } + switch n { + case 0, // invalid string + 1: // next extension + return end, end, end, true + case 2: + // next key + curKey = s[end+1 : p] + if curKey > key { + return end, end, end, true + } + start = end + sep = p + } + } +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (l Language, err error) { + defer func() { + if recover() != nil { + l = 0 + err = ErrSyntax + } + }() + + if n := len(s); n < 2 || 3 < n { + return 0, ErrSyntax + } + var buf [3]byte + return getLangID(buf[:copy(buf[:], s)]) +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (scr Script, err error) { + defer func() { + if recover() != nil { + scr = 0 + err = ErrSyntax + } + }() + + if len(s) != 4 { + return 0, ErrSyntax + } + var buf [4]byte + return getScriptID(script, buf[:copy(buf[:], s)]) +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + return getRegionM49(r) +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (r Region, err error) { + defer func() { + if recover() != nil { + r = 0 + err = ErrSyntax + } + }() + + if n := len(s); n < 2 || 3 < n { + return 0, ErrSyntax + } + var buf [3]byte + return getRegionID(buf[:copy(buf[:], s)]) +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + if r == 0 || r.IsGroup() || r.IsPrivateUse() && r != _XK { + return false + } + return true +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + if r == 0 { + return false + } + return int(regionInclusion[r]) < len(regionContainment) +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + if r == c { + return true + } + g := regionInclusion[r] + if g >= nRegionGroups { + return false + } + m := regionContainment[g] + + d := regionInclusion[c] + b := regionInclusionBits[d] + + // A contained country may belong to multiple disjoint groups. Matching any + // of these indicates containment. If the contained region is a group, it + // must strictly be a subset. + if d >= nRegionGroups { + return b&m != 0 + } + return b&^m == 0 +} + +var errNoTLD = errors.New("language: region is not a valid ccTLD") + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + // See http://en.wikipedia.org/wiki/Country_code_top-level_domain for the + // difference between ISO 3166-1 and IANA ccTLD. + if r == _GB { + r = _UK + } + if (r.typ() & ccTLD) == 0 { + return 0, errNoTLD + } + return r, nil +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + if cr := normRegion(r); cr != 0 { + return cr + } + return r +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + ID uint8 + str string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (v Variant, err error) { + defer func() { + if recover() != nil { + v = Variant{} + err = ErrSyntax + } + }() + + s = strings.ToLower(s) + if id, ok := variantIndex[s]; ok { + return Variant{id, s}, nil + } + return Variant{}, NewValueError([]byte(s)) +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.str +} diff --git a/vendor/golang.org/x/text/internal/language/lookup.go b/vendor/golang.org/x/text/internal/language/lookup.go new file mode 100644 index 00000000000..231b4fbdebf --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/lookup.go @@ -0,0 +1,412 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "fmt" + "sort" + "strconv" + + "golang.org/x/text/internal/tag" +) + +// findIndex tries to find the given tag in idx and returns a standardized error +// if it could not be found. +func findIndex(idx tag.Index, key []byte, form string) (index int, err error) { + if !tag.FixCase(form, key) { + return 0, ErrSyntax + } + i := idx.Index(key) + if i == -1 { + return 0, NewValueError(key) + } + return i, nil +} + +func searchUint(imap []uint16, key uint16) int { + return sort.Search(len(imap), func(i int) bool { + return imap[i] >= key + }) +} + +type Language uint16 + +// getLangID returns the langID of s if s is a canonical subtag +// or langUnknown if s is not a canonical subtag. +func getLangID(s []byte) (Language, error) { + if len(s) == 2 { + return getLangISO2(s) + } + return getLangISO3(s) +} + +// TODO language normalization as well as the AliasMaps could be moved to the +// higher level package, but it is a bit tricky to separate the generation. + +func (id Language) Canonicalize() (Language, AliasType) { + return normLang(id) +} + +// normLang returns the mapped langID of id according to mapping m. +func normLang(id Language) (Language, AliasType) { + k := sort.Search(len(AliasMap), func(i int) bool { + return AliasMap[i].From >= uint16(id) + }) + if k < len(AliasMap) && AliasMap[k].From == uint16(id) { + return Language(AliasMap[k].To), AliasTypes[k] + } + return id, AliasTypeUnknown +} + +// getLangISO2 returns the langID for the given 2-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO2(s []byte) (Language, error) { + if !tag.FixCase("zz", s) { + return 0, ErrSyntax + } + if i := lang.Index(s); i != -1 && lang.Elem(i)[3] != 0 { + return Language(i), nil + } + return 0, NewValueError(s) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s []byte) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +// getLangISO3 returns the langID for the given 3-letter ISO language code +// or unknownLang if this does not exist. +func getLangISO3(s []byte) (Language, error) { + if tag.FixCase("und", s) { + // first try to match canonical 3-letter entries + for i := lang.Index(s[:2]); i != -1; i = lang.Next(s[:2], i) { + if e := lang.Elem(i); e[3] == 0 && e[2] == s[2] { + // We treat "und" as special and always translate it to "unspecified". + // Note that ZZ and Zzzz are private use and are not treated as + // unspecified by default. + id := Language(i) + if id == nonCanonicalUnd { + return 0, nil + } + return id, nil + } + } + if i := altLangISO3.Index(s); i != -1 { + return Language(altLangIndex[altLangISO3.Elem(i)[3]]), nil + } + n := strToInt(s) + if langNoIndex[n/8]&(1<<(n%8)) != 0 { + return Language(n) + langNoIndexOffset, nil + } + // Check for non-canonical uses of ISO3. + for i := lang.Index(s[:1]); i != -1; i = lang.Next(s[:1], i) { + if e := lang.Elem(i); e[2] == s[1] && e[3] == s[2] { + return Language(i), nil + } + } + return 0, NewValueError(s) + } + return 0, ErrSyntax +} + +// StringToBuf writes the string to b and returns the number of bytes +// written. cap(b) must be >= 3. +func (id Language) StringToBuf(b []byte) int { + if id >= langNoIndexOffset { + intToStr(uint(id)-langNoIndexOffset, b[:3]) + return 3 + } else if id == 0 { + return copy(b, "und") + } + l := lang[id<<2:] + if l[3] == 0 { + return copy(b, l[:3]) + } + return copy(b, l[:2]) +} + +// String returns the BCP 47 representation of the langID. +// Use b as variable name, instead of id, to ensure the variable +// used is consistent with that of Base in which this type is embedded. +func (b Language) String() string { + if b == 0 { + return "und" + } else if b >= langNoIndexOffset { + b -= langNoIndexOffset + buf := [3]byte{} + intToStr(uint(b), buf[:]) + return string(buf[:]) + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } + return l[:2] +} + +// ISO3 returns the ISO 639-3 language code. +func (b Language) ISO3() string { + if b == 0 || b >= langNoIndexOffset { + return b.String() + } + l := lang.Elem(int(b)) + if l[3] == 0 { + return l[:3] + } else if l[2] == 0 { + return altLangISO3.Elem(int(l[3]))[:3] + } + // This allocation will only happen for 3-letter ISO codes + // that are non-canonical BCP 47 language identifiers. + return l[0:1] + l[2:4] +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b Language) IsPrivateUse() bool { + return langPrivateStart <= b && b <= langPrivateEnd +} + +// SuppressScript returns the script marked as SuppressScript in the IANA +// language tag repository, or 0 if there is no such script. +func (b Language) SuppressScript() Script { + if b < langNoIndexOffset { + return Script(suppressScript[b]) + } + return 0 +} + +type Region uint16 + +// getRegionID returns the region id for s if s is a valid 2-letter region code +// or unknownRegion. +func getRegionID(s []byte) (Region, error) { + if len(s) == 3 { + if isAlpha(s[0]) { + return getRegionISO3(s) + } + if i, err := strconv.ParseUint(string(s), 10, 10); err == nil { + return getRegionM49(int(i)) + } + } + return getRegionISO2(s) +} + +// getRegionISO2 returns the regionID for the given 2-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO2(s []byte) (Region, error) { + i, err := findIndex(regionISO, s, "ZZ") + if err != nil { + return 0, err + } + return Region(i) + isoRegionOffset, nil +} + +// getRegionISO3 returns the regionID for the given 3-letter ISO country code +// or unknownRegion if this does not exist. +func getRegionISO3(s []byte) (Region, error) { + if tag.FixCase("ZZZ", s) { + for i := regionISO.Index(s[:1]); i != -1; i = regionISO.Next(s[:1], i) { + if e := regionISO.Elem(i); e[2] == s[1] && e[3] == s[2] { + return Region(i) + isoRegionOffset, nil + } + } + for i := 0; i < len(altRegionISO3); i += 3 { + if tag.Compare(altRegionISO3[i:i+3], s) == 0 { + return Region(altRegionIDs[i/3]), nil + } + } + return 0, NewValueError(s) + } + return 0, ErrSyntax +} + +func getRegionM49(n int) (Region, error) { + if 0 < n && n <= 999 { + const ( + searchBits = 7 + regionBits = 9 + regionMask = 1<<regionBits - 1 + ) + idx := n >> searchBits + buf := fromM49[m49Index[idx]:m49Index[idx+1]] + val := uint16(n) << regionBits // we rely on bits shifting out + i := sort.Search(len(buf), func(i int) bool { + return buf[i] >= val + }) + if r := fromM49[int(m49Index[idx])+i]; r&^regionMask == val { + return Region(r & regionMask), nil + } + } + var e ValueError + fmt.Fprint(bytes.NewBuffer([]byte(e.v[:])), n) + return 0, e +} + +// normRegion returns a region if r is deprecated or 0 otherwise. +// TODO: consider supporting BYS (-> BLR), CSK (-> 200 or CZ), PHI (-> PHL) and AFI (-> DJ). +// TODO: consider mapping split up regions to new most populous one (like CLDR). +func normRegion(r Region) Region { + m := regionOldMap + k := sort.Search(len(m), func(i int) bool { + return m[i].From >= uint16(r) + }) + if k < len(m) && m[k].From == uint16(r) { + return Region(m[k].To) + } + return 0 +} + +const ( + iso3166UserAssigned = 1 << iota + ccTLD + bcp47Region +) + +func (r Region) typ() byte { + return regionTypes[r] +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r Region) String() string { + if r < isoRegionOffset { + if r == 0 { + return "ZZ" + } + return fmt.Sprintf("%03d", r.M49()) + } + r -= isoRegionOffset + return regionISO.Elem(int(r))[:2] +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r Region) ISO3() string { + if r < isoRegionOffset { + return "ZZZ" + } + r -= isoRegionOffset + reg := regionISO.Elem(int(r)) + switch reg[2] { + case 0: + return altRegionISO3[reg[3]:][:3] + case ' ': + return "ZZZ" + } + return reg[0:1] + reg[2:4] +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r Region) M49() int { + return int(m49[r]) +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r Region) IsPrivateUse() bool { + return r.typ()&iso3166UserAssigned != 0 +} + +type Script uint16 + +// getScriptID returns the script id for string s. It assumes that s +// is of the format [A-Z][a-z]{3}. +func getScriptID(idx tag.Index, s []byte) (Script, error) { + i, err := findIndex(idx, s, "Zzzz") + return Script(i), err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s Script) String() string { + if s == 0 { + return "Zzzz" + } + return script.Elem(int(s)) +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s Script) IsPrivateUse() bool { + return _Qaaa <= s && s <= _Qabx +} + +const ( + maxAltTaglen = len("en-US-POSIX") + maxLen = maxAltTaglen +) + +var ( + // grandfatheredMap holds a mapping from legacy and grandfathered tags to + // their base language or index to more elaborate tag. + grandfatheredMap = map[[maxLen]byte]int16{ + [maxLen]byte{'a', 'r', 't', '-', 'l', 'o', 'j', 'b', 'a', 'n'}: _jbo, // art-lojban + [maxLen]byte{'i', '-', 'a', 'm', 'i'}: _ami, // i-ami + [maxLen]byte{'i', '-', 'b', 'n', 'n'}: _bnn, // i-bnn + [maxLen]byte{'i', '-', 'h', 'a', 'k'}: _hak, // i-hak + [maxLen]byte{'i', '-', 'k', 'l', 'i', 'n', 'g', 'o', 'n'}: _tlh, // i-klingon + [maxLen]byte{'i', '-', 'l', 'u', 'x'}: _lb, // i-lux + [maxLen]byte{'i', '-', 'n', 'a', 'v', 'a', 'j', 'o'}: _nv, // i-navajo + [maxLen]byte{'i', '-', 'p', 'w', 'n'}: _pwn, // i-pwn + [maxLen]byte{'i', '-', 't', 'a', 'o'}: _tao, // i-tao + [maxLen]byte{'i', '-', 't', 'a', 'y'}: _tay, // i-tay + [maxLen]byte{'i', '-', 't', 's', 'u'}: _tsu, // i-tsu + [maxLen]byte{'n', 'o', '-', 'b', 'o', 'k'}: _nb, // no-bok + [maxLen]byte{'n', 'o', '-', 'n', 'y', 'n'}: _nn, // no-nyn + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'f', 'r'}: _sfb, // sgn-BE-FR + [maxLen]byte{'s', 'g', 'n', '-', 'b', 'e', '-', 'n', 'l'}: _vgt, // sgn-BE-NL + [maxLen]byte{'s', 'g', 'n', '-', 'c', 'h', '-', 'd', 'e'}: _sgg, // sgn-CH-DE + [maxLen]byte{'z', 'h', '-', 'g', 'u', 'o', 'y', 'u'}: _cmn, // zh-guoyu + [maxLen]byte{'z', 'h', '-', 'h', 'a', 'k', 'k', 'a'}: _hak, // zh-hakka + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n', '-', 'n', 'a', 'n'}: _nan, // zh-min-nan + [maxLen]byte{'z', 'h', '-', 'x', 'i', 'a', 'n', 'g'}: _hsn, // zh-xiang + + // Grandfathered tags with no modern replacement will be converted as + // follows: + [maxLen]byte{'c', 'e', 'l', '-', 'g', 'a', 'u', 'l', 'i', 's', 'h'}: -1, // cel-gaulish + [maxLen]byte{'e', 'n', '-', 'g', 'b', '-', 'o', 'e', 'd'}: -2, // en-GB-oed + [maxLen]byte{'i', '-', 'd', 'e', 'f', 'a', 'u', 'l', 't'}: -3, // i-default + [maxLen]byte{'i', '-', 'e', 'n', 'o', 'c', 'h', 'i', 'a', 'n'}: -4, // i-enochian + [maxLen]byte{'i', '-', 'm', 'i', 'n', 'g', 'o'}: -5, // i-mingo + [maxLen]byte{'z', 'h', '-', 'm', 'i', 'n'}: -6, // zh-min + + // CLDR-specific tag. + [maxLen]byte{'r', 'o', 'o', 't'}: 0, // root + [maxLen]byte{'e', 'n', '-', 'u', 's', '-', 'p', 'o', 's', 'i', 'x'}: -7, // en_US_POSIX" + } + + altTagIndex = [...]uint8{0, 17, 31, 45, 61, 74, 86, 102} + + altTags = "xtg-x-cel-gaulishen-GB-oxendicten-x-i-defaultund-x-i-enochiansee-x-i-mingonan-x-zh-minen-US-u-va-posix" +) + +func grandfathered(s [maxAltTaglen]byte) (t Tag, ok bool) { + if v, ok := grandfatheredMap[s]; ok { + if v < 0 { + return Make(altTags[altTagIndex[-v-1]:altTagIndex[-v]]), true + } + t.LangID = Language(v) + return t, true + } + return t, false +} diff --git a/vendor/golang.org/x/text/internal/language/match.go b/vendor/golang.org/x/text/internal/language/match.go new file mode 100644 index 00000000000..75a2dbca764 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/match.go @@ -0,0 +1,226 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "errors" + +type scriptRegionFlags uint8 + +const ( + isList = 1 << iota + scriptInFrom + regionInFrom +) + +func (t *Tag) setUndefinedLang(id Language) { + if t.LangID == 0 { + t.LangID = id + } +} + +func (t *Tag) setUndefinedScript(id Script) { + if t.ScriptID == 0 { + t.ScriptID = id + } +} + +func (t *Tag) setUndefinedRegion(id Region) { + if t.RegionID == 0 || t.RegionID.Contains(id) { + t.RegionID = id + } +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// addLikelySubtags sets subtags to their most likely value, given the locale. +// In most cases this means setting fields for unknown values, but in some +// cases it may alter a value. It returns an ErrMissingLikelyTagsData error +// if the given locale cannot be expanded. +func (t Tag) addLikelySubtags() (Tag, error) { + id, err := addTags(t) + if err != nil { + return t, err + } else if id.equalTags(t) { + return t, nil + } + id.RemakeString() + return id, nil +} + +// specializeRegion attempts to specialize a group region. +func specializeRegion(t *Tag) bool { + if i := regionInclusion[t.RegionID]; i < nRegionGroups { + x := likelyRegionGroup[i] + if Language(x.lang) == t.LangID && Script(x.script) == t.ScriptID { + t.RegionID = Region(x.region) + } + return true + } + return false +} + +// Maximize returns a new tag with missing tags filled in. +func (t Tag) Maximize() (Tag, error) { + return addTags(t) +} + +func addTags(t Tag) (Tag, error) { + // We leave private use identifiers alone. + if t.IsPrivateUse() { + return t, nil + } + if t.ScriptID != 0 && t.RegionID != 0 { + if t.LangID != 0 { + // already fully specified + specializeRegion(&t) + return t, nil + } + // Search matches for und-script-region. Note that for these cases + // region will never be a group so there is no need to check for this. + list := likelyRegion[t.RegionID : t.RegionID+1] + if x := list[0]; x.flags&isList != 0 { + list = likelyRegionList[x.lang : x.lang+uint16(x.script)] + } + for _, x := range list { + // Deviating from the spec. See match_test.go for details. + if Script(x.script) == t.ScriptID { + t.setUndefinedLang(Language(x.lang)) + return t, nil + } + } + } + if t.LangID != 0 { + // Search matches for lang-script and lang-region, where lang != und. + if t.LangID < langNoIndexOffset { + x := likelyLang[t.LangID] + if x.flags&isList != 0 { + list := likelyLangList[x.region : x.region+uint16(x.script)] + if t.ScriptID != 0 { + for _, x := range list { + if Script(x.script) == t.ScriptID && x.flags&scriptInFrom != 0 { + t.setUndefinedRegion(Region(x.region)) + return t, nil + } + } + } else if t.RegionID != 0 { + count := 0 + goodScript := true + tt := t + for _, x := range list { + // We visit all entries for which the script was not + // defined, including the ones where the region was not + // defined. This allows for proper disambiguation within + // regions. + if x.flags&scriptInFrom == 0 && t.RegionID.Contains(Region(x.region)) { + tt.RegionID = Region(x.region) + tt.setUndefinedScript(Script(x.script)) + goodScript = goodScript && tt.ScriptID == Script(x.script) + count++ + } + } + if count == 1 { + return tt, nil + } + // Even if we fail to find a unique Region, we might have + // an unambiguous script. + if goodScript { + t.ScriptID = tt.ScriptID + } + } + } + } + } else { + // Search matches for und-script. + if t.ScriptID != 0 { + x := likelyScript[t.ScriptID] + if x.region != 0 { + t.setUndefinedRegion(Region(x.region)) + t.setUndefinedLang(Language(x.lang)) + return t, nil + } + } + // Search matches for und-region. If und-script-region exists, it would + // have been found earlier. + if t.RegionID != 0 { + if i := regionInclusion[t.RegionID]; i < nRegionGroups { + x := likelyRegionGroup[i] + if x.region != 0 { + t.setUndefinedLang(Language(x.lang)) + t.setUndefinedScript(Script(x.script)) + t.RegionID = Region(x.region) + } + } else { + x := likelyRegion[t.RegionID] + if x.flags&isList != 0 { + x = likelyRegionList[x.lang] + } + if x.script != 0 && x.flags != scriptInFrom { + t.setUndefinedLang(Language(x.lang)) + t.setUndefinedScript(Script(x.script)) + return t, nil + } + } + } + } + + // Search matches for lang. + if t.LangID < langNoIndexOffset { + x := likelyLang[t.LangID] + if x.flags&isList != 0 { + x = likelyLangList[x.region] + } + if x.region != 0 { + t.setUndefinedScript(Script(x.script)) + t.setUndefinedRegion(Region(x.region)) + } + specializeRegion(&t) + if t.LangID == 0 { + t.LangID = _en // default language + } + return t, nil + } + return t, ErrMissingLikelyTagsData +} + +func (t *Tag) setTagsFrom(id Tag) { + t.LangID = id.LangID + t.ScriptID = id.ScriptID + t.RegionID = id.RegionID +} + +// minimize removes the region or script subtags from t such that +// t.addLikelySubtags() == t.minimize().addLikelySubtags(). +func (t Tag) minimize() (Tag, error) { + t, err := minimizeTags(t) + if err != nil { + return t, err + } + t.RemakeString() + return t, nil +} + +// minimizeTags mimics the behavior of the ICU 51 C implementation. +func minimizeTags(t Tag) (Tag, error) { + if t.equalTags(Und) { + return t, nil + } + max, err := addTags(t) + if err != nil { + return t, err + } + for _, id := range [...]Tag{ + {LangID: t.LangID}, + {LangID: t.LangID, RegionID: t.RegionID}, + {LangID: t.LangID, ScriptID: t.ScriptID}, + } { + if x, err := addTags(id); err == nil && max.equalTags(x) { + t.setTagsFrom(id) + break + } + } + return t, nil +} diff --git a/vendor/golang.org/x/text/internal/language/parse.go b/vendor/golang.org/x/text/internal/language/parse.go new file mode 100644 index 00000000000..aad1e0acf77 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/parse.go @@ -0,0 +1,608 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "bytes" + "errors" + "fmt" + "sort" + + "golang.org/x/text/internal/tag" +) + +// isAlpha returns true if the byte is not a digit. +// b must be an ASCII letter or digit. +func isAlpha(b byte) bool { + return b > '9' +} + +// isAlphaNum returns true if the string contains only ASCII letters or digits. +func isAlphaNum(s []byte) bool { + for _, c := range s { + if !('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9') { + return false + } + } + return true +} + +// ErrSyntax is returned by any of the parsing functions when the +// input is not well-formed, according to BCP 47. +// TODO: return the position at which the syntax error occurred? +var ErrSyntax = errors.New("language: tag is not well-formed") + +// ErrDuplicateKey is returned when a tag contains the same key twice with +// different values in the -u section. +var ErrDuplicateKey = errors.New("language: different values for same key in -u extension") + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError struct { + v [8]byte +} + +// NewValueError creates a new ValueError. +func NewValueError(tag []byte) ValueError { + var e ValueError + copy(e.v[:], tag) + return e +} + +func (e ValueError) tag() []byte { + n := bytes.IndexByte(e.v[:], 0) + if n == -1 { + n = 8 + } + return e.v[:n] +} + +// Error implements the error interface. +func (e ValueError) Error() string { + return fmt.Sprintf("language: subtag %q is well-formed but unknown", e.tag()) +} + +// Subtag returns the subtag for which the error occurred. +func (e ValueError) Subtag() string { + return string(e.tag()) +} + +// scanner is used to scan BCP 47 tokens, which are separated by _ or -. +type scanner struct { + b []byte + bytes [max99thPercentileSize]byte + token []byte + start int // start position of the current token + end int // end position of the current token + next int // next point for scan + err error + done bool +} + +func makeScannerString(s string) scanner { + scan := scanner{} + if len(s) <= len(scan.bytes) { + scan.b = scan.bytes[:copy(scan.bytes[:], s)] + } else { + scan.b = []byte(s) + } + scan.init() + return scan +} + +// makeScanner returns a scanner using b as the input buffer. +// b is not copied and may be modified by the scanner routines. +func makeScanner(b []byte) scanner { + scan := scanner{b: b} + scan.init() + return scan +} + +func (s *scanner) init() { + for i, c := range s.b { + if c == '_' { + s.b[i] = '-' + } + } + s.scan() +} + +// restToLower converts the string between start and end to lower case. +func (s *scanner) toLower(start, end int) { + for i := start; i < end; i++ { + c := s.b[i] + if 'A' <= c && c <= 'Z' { + s.b[i] += 'a' - 'A' + } + } +} + +func (s *scanner) setError(e error) { + if s.err == nil || (e == ErrSyntax && s.err != ErrSyntax) { + s.err = e + } +} + +// resizeRange shrinks or grows the array at position oldStart such that +// a new string of size newSize can fit between oldStart and oldEnd. +// Sets the scan point to after the resized range. +func (s *scanner) resizeRange(oldStart, oldEnd, newSize int) { + s.start = oldStart + if end := oldStart + newSize; end != oldEnd { + diff := end - oldEnd + var b []byte + if n := len(s.b) + diff; n > cap(s.b) { + b = make([]byte, n) + copy(b, s.b[:oldStart]) + } else { + b = s.b[:n] + } + copy(b[end:], s.b[oldEnd:]) + s.b = b + s.next = end + (s.next - s.end) + s.end = end + } +} + +// replace replaces the current token with repl. +func (s *scanner) replace(repl string) { + s.resizeRange(s.start, s.end, len(repl)) + copy(s.b[s.start:], repl) +} + +// gobble removes the current token from the input. +// Caller must call scan after calling gobble. +func (s *scanner) gobble(e error) { + s.setError(e) + if s.start == 0 { + s.b = s.b[:+copy(s.b, s.b[s.next:])] + s.end = 0 + } else { + s.b = s.b[:s.start-1+copy(s.b[s.start-1:], s.b[s.end:])] + s.end = s.start - 1 + } + s.next = s.start +} + +// deleteRange removes the given range from s.b before the current token. +func (s *scanner) deleteRange(start, end int) { + s.b = s.b[:start+copy(s.b[start:], s.b[end:])] + diff := end - start + s.next -= diff + s.start -= diff + s.end -= diff +} + +// scan parses the next token of a BCP 47 string. Tokens that are larger +// than 8 characters or include non-alphanumeric characters result in an error +// and are gobbled and removed from the output. +// It returns the end position of the last token consumed. +func (s *scanner) scan() (end int) { + end = s.end + s.token = nil + for s.start = s.next; s.next < len(s.b); { + i := bytes.IndexByte(s.b[s.next:], '-') + if i == -1 { + s.end = len(s.b) + s.next = len(s.b) + i = s.end - s.start + } else { + s.end = s.next + i + s.next = s.end + 1 + } + token := s.b[s.start:s.end] + if i < 1 || i > 8 || !isAlphaNum(token) { + s.gobble(ErrSyntax) + continue + } + s.token = token + return end + } + if n := len(s.b); n > 0 && s.b[n-1] == '-' { + s.setError(ErrSyntax) + s.b = s.b[:len(s.b)-1] + } + s.done = true + return end +} + +// acceptMinSize parses multiple tokens of the given size or greater. +// It returns the end position of the last token consumed. +func (s *scanner) acceptMinSize(min int) (end int) { + end = s.end + s.scan() + for ; len(s.token) >= min; s.scan() { + end = s.end + } + return end +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +func Parse(s string) (t Tag, err error) { + // TODO: consider supporting old-style locale key-value pairs. + if s == "" { + return Und, ErrSyntax + } + defer func() { + if recover() != nil { + t = Und + err = ErrSyntax + return + } + }() + if len(s) <= maxAltTaglen { + b := [maxAltTaglen]byte{} + for i, c := range s { + // Generating invalid UTF-8 is okay as it won't match. + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } else if c == '_' { + c = '-' + } + b[i] = byte(c) + } + if t, ok := grandfathered(b); ok { + return t, nil + } + } + scan := makeScannerString(s) + return parse(&scan, s) +} + +func parse(scan *scanner, s string) (t Tag, err error) { + t = Und + var end int + if n := len(scan.token); n <= 1 { + scan.toLower(0, len(scan.b)) + if n == 0 || scan.token[0] != 'x' { + return t, ErrSyntax + } + end = parseExtensions(scan) + } else if n >= 4 { + return Und, ErrSyntax + } else { // the usual case + t, end = parseTag(scan, true) + if n := len(scan.token); n == 1 { + t.pExt = uint16(end) + end = parseExtensions(scan) + } else if end < len(scan.b) { + scan.setError(ErrSyntax) + scan.b = scan.b[:end] + } + } + if int(t.pVariant) < len(scan.b) { + if end < len(s) { + s = s[:end] + } + if len(s) > 0 && tag.Compare(s, scan.b) == 0 { + t.str = s + } else { + t.str = string(scan.b) + } + } else { + t.pVariant, t.pExt = 0, 0 + } + return t, scan.err +} + +// parseTag parses language, script, region and variants. +// It returns a Tag and the end position in the input that was parsed. +// If doNorm is true, then <lang>-<extlang> will be normalized to <extlang>. +func parseTag(scan *scanner, doNorm bool) (t Tag, end int) { + var e error + // TODO: set an error if an unknown lang, script or region is encountered. + t.LangID, e = getLangID(scan.token) + scan.setError(e) + scan.replace(t.LangID.String()) + langStart := scan.start + end = scan.scan() + for len(scan.token) == 3 && isAlpha(scan.token[0]) { + // From http://tools.ietf.org/html/bcp47, <lang>-<extlang> tags are equivalent + // to a tag of the form <extlang>. + if doNorm { + lang, e := getLangID(scan.token) + if lang != 0 { + t.LangID = lang + langStr := lang.String() + copy(scan.b[langStart:], langStr) + scan.b[langStart+len(langStr)] = '-' + scan.start = langStart + len(langStr) + 1 + } + scan.gobble(e) + } + end = scan.scan() + } + if len(scan.token) == 4 && isAlpha(scan.token[0]) { + t.ScriptID, e = getScriptID(script, scan.token) + if t.ScriptID == 0 { + scan.gobble(e) + } + end = scan.scan() + } + if n := len(scan.token); n >= 2 && n <= 3 { + t.RegionID, e = getRegionID(scan.token) + if t.RegionID == 0 { + scan.gobble(e) + } else { + scan.replace(t.RegionID.String()) + } + end = scan.scan() + } + scan.toLower(scan.start, len(scan.b)) + t.pVariant = byte(end) + end = parseVariants(scan, end, t) + t.pExt = uint16(end) + return t, end +} + +var separator = []byte{'-'} + +// parseVariants scans tokens as long as each token is a valid variant string. +// Duplicate variants are removed. +func parseVariants(scan *scanner, end int, t Tag) int { + start := scan.start + varIDBuf := [4]uint8{} + variantBuf := [4][]byte{} + varID := varIDBuf[:0] + variant := variantBuf[:0] + last := -1 + needSort := false + for ; len(scan.token) >= 4; scan.scan() { + // TODO: measure the impact of needing this conversion and redesign + // the data structure if there is an issue. + v, ok := variantIndex[string(scan.token)] + if !ok { + // unknown variant + // TODO: allow user-defined variants? + scan.gobble(NewValueError(scan.token)) + continue + } + varID = append(varID, v) + variant = append(variant, scan.token) + if !needSort { + if last < int(v) { + last = int(v) + } else { + needSort = true + // There is no legal combinations of more than 7 variants + // (and this is by no means a useful sequence). + const maxVariants = 8 + if len(varID) > maxVariants { + break + } + } + } + end = scan.end + } + if needSort { + sort.Sort(variantsSort{varID, variant}) + k, l := 0, -1 + for i, v := range varID { + w := int(v) + if l == w { + // Remove duplicates. + continue + } + varID[k] = varID[i] + variant[k] = variant[i] + k++ + l = w + } + if str := bytes.Join(variant[:k], separator); len(str) == 0 { + end = start - 1 + } else { + scan.resizeRange(start, end, len(str)) + copy(scan.b[scan.start:], str) + end = scan.end + } + } + return end +} + +type variantsSort struct { + i []uint8 + v [][]byte +} + +func (s variantsSort) Len() int { + return len(s.i) +} + +func (s variantsSort) Swap(i, j int) { + s.i[i], s.i[j] = s.i[j], s.i[i] + s.v[i], s.v[j] = s.v[j], s.v[i] +} + +func (s variantsSort) Less(i, j int) bool { + return s.i[i] < s.i[j] +} + +type bytesSort struct { + b [][]byte + n int // first n bytes to compare +} + +func (b bytesSort) Len() int { + return len(b.b) +} + +func (b bytesSort) Swap(i, j int) { + b.b[i], b.b[j] = b.b[j], b.b[i] +} + +func (b bytesSort) Less(i, j int) bool { + for k := 0; k < b.n; k++ { + if b.b[i][k] == b.b[j][k] { + continue + } + return b.b[i][k] < b.b[j][k] + } + return false +} + +// parseExtensions parses and normalizes the extensions in the buffer. +// It returns the last position of scan.b that is part of any extension. +// It also trims scan.b to remove excess parts accordingly. +func parseExtensions(scan *scanner) int { + start := scan.start + exts := [][]byte{} + private := []byte{} + end := scan.end + for len(scan.token) == 1 { + extStart := scan.start + ext := scan.token[0] + end = parseExtension(scan) + extension := scan.b[extStart:end] + if len(extension) < 3 || (ext != 'x' && len(extension) < 4) { + scan.setError(ErrSyntax) + end = extStart + continue + } else if start == extStart && (ext == 'x' || scan.start == len(scan.b)) { + scan.b = scan.b[:end] + return end + } else if ext == 'x' { + private = extension + break + } + exts = append(exts, extension) + } + sort.Sort(bytesSort{exts, 1}) + if len(private) > 0 { + exts = append(exts, private) + } + scan.b = scan.b[:start] + if len(exts) > 0 { + scan.b = append(scan.b, bytes.Join(exts, separator)...) + } else if start > 0 { + // Strip trailing '-'. + scan.b = scan.b[:start-1] + } + return end +} + +// parseExtension parses a single extension and returns the position of +// the extension end. +func parseExtension(scan *scanner) int { + start, end := scan.start, scan.end + switch scan.token[0] { + case 'u': // https://www.ietf.org/rfc/rfc6067.txt + attrStart := end + scan.scan() + for last := []byte{}; len(scan.token) > 2; scan.scan() { + if bytes.Compare(scan.token, last) != -1 { + // Attributes are unsorted. Start over from scratch. + p := attrStart + 1 + scan.next = p + attrs := [][]byte{} + for scan.scan(); len(scan.token) > 2; scan.scan() { + attrs = append(attrs, scan.token) + end = scan.end + } + sort.Sort(bytesSort{attrs, 3}) + copy(scan.b[p:], bytes.Join(attrs, separator)) + break + } + last = scan.token + end = scan.end + } + // Scan key-type sequences. A key is of length 2 and may be followed + // by 0 or more "type" subtags from 3 to the maximum of 8 letters. + var last, key []byte + for attrEnd := end; len(scan.token) == 2; last = key { + key = scan.token + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } + // TODO: check key value validity + if bytes.Compare(key, last) != 1 || scan.err != nil { + // We have an invalid key or the keys are not sorted. + // Start scanning keys from scratch and reorder. + p := attrEnd + 1 + scan.next = p + keys := [][]byte{} + for scan.scan(); len(scan.token) == 2; { + keyStart := scan.start + end = scan.end + for scan.scan(); end < scan.end && len(scan.token) > 2; scan.scan() { + end = scan.end + } + keys = append(keys, scan.b[keyStart:end]) + } + sort.Stable(bytesSort{keys, 2}) + if n := len(keys); n > 0 { + k := 0 + for i := 1; i < n; i++ { + if !bytes.Equal(keys[k][:2], keys[i][:2]) { + k++ + keys[k] = keys[i] + } else if !bytes.Equal(keys[k], keys[i]) { + scan.setError(ErrDuplicateKey) + } + } + keys = keys[:k+1] + } + reordered := bytes.Join(keys, separator) + if e := p + len(reordered); e < end { + scan.deleteRange(e, end) + end = e + } + copy(scan.b[p:], reordered) + break + } + } + case 't': // https://www.ietf.org/rfc/rfc6497.txt + scan.scan() + if n := len(scan.token); n >= 2 && n <= 3 && isAlpha(scan.token[1]) { + _, end = parseTag(scan, false) + scan.toLower(start, end) + } + for len(scan.token) == 2 && !isAlpha(scan.token[1]) { + end = scan.acceptMinSize(3) + } + case 'x': + end = scan.acceptMinSize(1) + default: + end = scan.acceptMinSize(2) + } + return end +} + +// getExtension returns the name, body and end position of the extension. +func getExtension(s string, p int) (end int, ext string) { + if s[p] == '-' { + p++ + } + if s[p] == 'x' { + return len(s), s[p:] + } + end = nextExtension(s, p) + return end, s[p:end] +} + +// nextExtension finds the next extension within the string, searching +// for the -<char>- pattern from position p. +// In the fast majority of cases, language tags will have at most +// one extension and extensions tend to be small. +func nextExtension(s string, p int) int { + for n := len(s) - 3; p < n; { + if s[p] == '-' { + if s[p+2] == '-' { + return p + } + p += 3 + } else { + p++ + } + } + return len(s) +} diff --git a/vendor/golang.org/x/text/internal/language/tables.go b/vendor/golang.org/x/text/internal/language/tables.go new file mode 100644 index 00000000000..14167e74e40 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/tables.go @@ -0,0 +1,3494 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +import "golang.org/x/text/internal/tag" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +const NumLanguages = 8798 + +const NumScripts = 261 + +const NumRegions = 358 + +type FromTo struct { + From uint16 + To uint16 +} + +const nonCanonicalUnd = 1201 +const ( + _af = 22 + _am = 39 + _ar = 58 + _az = 88 + _bg = 126 + _bn = 165 + _ca = 215 + _cs = 250 + _da = 257 + _de = 269 + _el = 310 + _en = 313 + _es = 318 + _et = 320 + _fa = 328 + _fi = 337 + _fil = 339 + _fr = 350 + _gu = 420 + _he = 444 + _hi = 446 + _hr = 465 + _hu = 469 + _hy = 471 + _id = 481 + _is = 504 + _it = 505 + _ja = 512 + _ka = 528 + _kk = 578 + _km = 586 + _kn = 593 + _ko = 596 + _ky = 650 + _lo = 696 + _lt = 704 + _lv = 711 + _mk = 767 + _ml = 772 + _mn = 779 + _mo = 784 + _mr = 795 + _ms = 799 + _mul = 806 + _my = 817 + _nb = 839 + _ne = 849 + _nl = 871 + _no = 879 + _pa = 925 + _pl = 947 + _pt = 960 + _ro = 988 + _ru = 994 + _sh = 1031 + _si = 1036 + _sk = 1042 + _sl = 1046 + _sq = 1073 + _sr = 1074 + _sv = 1092 + _sw = 1093 + _ta = 1104 + _te = 1121 + _th = 1131 + _tl = 1146 + _tn = 1152 + _tr = 1162 + _uk = 1198 + _ur = 1204 + _uz = 1212 + _vi = 1219 + _zh = 1321 + _zu = 1327 + _jbo = 515 + _ami = 1650 + _bnn = 2357 + _hak = 438 + _tlh = 14467 + _lb = 661 + _nv = 899 + _pwn = 12055 + _tao = 14188 + _tay = 14198 + _tsu = 14662 + _nn = 874 + _sfb = 13629 + _vgt = 15701 + _sgg = 13660 + _cmn = 3007 + _nan = 835 + _hsn = 467 +) + +const langPrivateStart = 0x2f72 + +const langPrivateEnd = 0x3179 + +// lang holds an alphabetically sorted list of ISO-639 language identifiers. +// All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +// For 2-byte language identifiers, the two successive bytes have the following meaning: +// - if the first letter of the 2- and 3-letter ISO codes are the same: +// the second and third letter of the 3-letter ISO code. +// - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +// +// For 3-byte language identifiers the 4th byte is 0. +const lang tag.Index = "" + // Size: 5324 bytes + "---\x00aaaraai\x00aak\x00aau\x00abbkabi\x00abq\x00abr\x00abt\x00aby\x00a" + + "cd\x00ace\x00ach\x00ada\x00ade\x00adj\x00ady\x00adz\x00aeveaeb\x00aey" + + "\x00affragc\x00agd\x00agg\x00agm\x00ago\x00agq\x00aha\x00ahl\x00aho\x00a" + + "jg\x00akkaakk\x00ala\x00ali\x00aln\x00alt\x00ammhamm\x00amn\x00amo\x00am" + + "p\x00anrganc\x00ank\x00ann\x00any\x00aoj\x00aom\x00aoz\x00apc\x00apd\x00" + + "ape\x00apr\x00aps\x00apz\x00arraarc\x00arh\x00arn\x00aro\x00arq\x00ars" + + "\x00ary\x00arz\x00assmasa\x00ase\x00asg\x00aso\x00ast\x00ata\x00atg\x00a" + + "tj\x00auy\x00avvaavl\x00avn\x00avt\x00avu\x00awa\x00awb\x00awo\x00awx" + + "\x00ayymayb\x00azzebaakbal\x00ban\x00bap\x00bar\x00bas\x00bav\x00bax\x00" + + "bba\x00bbb\x00bbc\x00bbd\x00bbj\x00bbp\x00bbr\x00bcf\x00bch\x00bci\x00bc" + + "m\x00bcn\x00bco\x00bcq\x00bcu\x00bdd\x00beelbef\x00beh\x00bej\x00bem\x00" + + "bet\x00bew\x00bex\x00bez\x00bfd\x00bfq\x00bft\x00bfy\x00bgulbgc\x00bgn" + + "\x00bgx\x00bhihbhb\x00bhg\x00bhi\x00bhk\x00bhl\x00bho\x00bhy\x00biisbib" + + "\x00big\x00bik\x00bim\x00bin\x00bio\x00biq\x00bjh\x00bji\x00bjj\x00bjn" + + "\x00bjo\x00bjr\x00bjt\x00bjz\x00bkc\x00bkm\x00bkq\x00bku\x00bkv\x00blt" + + "\x00bmambmh\x00bmk\x00bmq\x00bmu\x00bnenbng\x00bnm\x00bnp\x00boodboj\x00" + + "bom\x00bon\x00bpy\x00bqc\x00bqi\x00bqp\x00bqv\x00brrebra\x00brh\x00brx" + + "\x00brz\x00bsosbsj\x00bsq\x00bss\x00bst\x00bto\x00btt\x00btv\x00bua\x00b" + + "uc\x00bud\x00bug\x00buk\x00bum\x00buo\x00bus\x00buu\x00bvb\x00bwd\x00bwr" + + "\x00bxh\x00bye\x00byn\x00byr\x00bys\x00byv\x00byx\x00bza\x00bze\x00bzf" + + "\x00bzh\x00bzw\x00caatcan\x00cbj\x00cch\x00ccp\x00ceheceb\x00cfa\x00cgg" + + "\x00chhachk\x00chm\x00cho\x00chp\x00chr\x00cja\x00cjm\x00cjv\x00ckb\x00c" + + "kl\x00cko\x00cky\x00cla\x00cme\x00cmg\x00cooscop\x00cps\x00crrecrh\x00cr" + + "j\x00crk\x00crl\x00crm\x00crs\x00csescsb\x00csw\x00ctd\x00cuhucvhvcyymda" + + "andad\x00daf\x00dag\x00dah\x00dak\x00dar\x00dav\x00dbd\x00dbq\x00dcc\x00" + + "ddn\x00deeuded\x00den\x00dga\x00dgh\x00dgi\x00dgl\x00dgr\x00dgz\x00dia" + + "\x00dje\x00dnj\x00dob\x00doi\x00dop\x00dow\x00dri\x00drs\x00dsb\x00dtm" + + "\x00dtp\x00dts\x00dty\x00dua\x00duc\x00dud\x00dug\x00dvivdva\x00dww\x00d" + + "yo\x00dyu\x00dzzodzg\x00ebu\x00eeweefi\x00egl\x00egy\x00eka\x00eky\x00el" + + "llema\x00emi\x00enngenn\x00enq\x00eopoeri\x00es\x00\x05esu\x00etstetr" + + "\x00ett\x00etu\x00etx\x00euusewo\x00ext\x00faasfaa\x00fab\x00fag\x00fai" + + "\x00fan\x00ffulffi\x00ffm\x00fiinfia\x00fil\x00fit\x00fjijflr\x00fmp\x00" + + "foaofod\x00fon\x00for\x00fpe\x00fqs\x00frrafrc\x00frp\x00frr\x00frs\x00f" + + "ub\x00fud\x00fue\x00fuf\x00fuh\x00fuq\x00fur\x00fuv\x00fuy\x00fvr\x00fyr" + + "ygalegaa\x00gaf\x00gag\x00gah\x00gaj\x00gam\x00gan\x00gaw\x00gay\x00gba" + + "\x00gbf\x00gbm\x00gby\x00gbz\x00gcr\x00gdlagde\x00gdn\x00gdr\x00geb\x00g" + + "ej\x00gel\x00gez\x00gfk\x00ggn\x00ghs\x00gil\x00gim\x00gjk\x00gjn\x00gju" + + "\x00gkn\x00gkp\x00gllgglk\x00gmm\x00gmv\x00gnrngnd\x00gng\x00god\x00gof" + + "\x00goi\x00gom\x00gon\x00gor\x00gos\x00got\x00grb\x00grc\x00grt\x00grw" + + "\x00gsw\x00guujgub\x00guc\x00gud\x00gur\x00guw\x00gux\x00guz\x00gvlvgvf" + + "\x00gvr\x00gvs\x00gwc\x00gwi\x00gwt\x00gyi\x00haauhag\x00hak\x00ham\x00h" + + "aw\x00haz\x00hbb\x00hdy\x00heebhhy\x00hiinhia\x00hif\x00hig\x00hih\x00hi" + + "l\x00hla\x00hlu\x00hmd\x00hmt\x00hnd\x00hne\x00hnj\x00hnn\x00hno\x00homo" + + "hoc\x00hoj\x00hot\x00hrrvhsb\x00hsn\x00htathuunhui\x00hyyehzerianaian" + + "\x00iar\x00iba\x00ibb\x00iby\x00ica\x00ich\x00idndidd\x00idi\x00idu\x00i" + + "eleife\x00igboigb\x00ige\x00iiiiijj\x00ikpkikk\x00ikt\x00ikw\x00ikx\x00i" + + "lo\x00imo\x00inndinh\x00iodoiou\x00iri\x00isslittaiukuiw\x00\x03iwm\x00i" + + "ws\x00izh\x00izi\x00japnjab\x00jam\x00jbo\x00jbu\x00jen\x00jgk\x00jgo" + + "\x00ji\x00\x06jib\x00jmc\x00jml\x00jra\x00jut\x00jvavjwavkaatkaa\x00kab" + + "\x00kac\x00kad\x00kai\x00kaj\x00kam\x00kao\x00kbd\x00kbm\x00kbp\x00kbq" + + "\x00kbx\x00kby\x00kcg\x00kck\x00kcl\x00kct\x00kde\x00kdh\x00kdl\x00kdt" + + "\x00kea\x00ken\x00kez\x00kfo\x00kfr\x00kfy\x00kgonkge\x00kgf\x00kgp\x00k" + + "ha\x00khb\x00khn\x00khq\x00khs\x00kht\x00khw\x00khz\x00kiikkij\x00kiu" + + "\x00kiw\x00kjuakjd\x00kjg\x00kjs\x00kjy\x00kkazkkc\x00kkj\x00klalkln\x00" + + "klq\x00klt\x00klx\x00kmhmkmb\x00kmh\x00kmo\x00kms\x00kmu\x00kmw\x00knank" + + "nf\x00knp\x00koorkoi\x00kok\x00kol\x00kos\x00koz\x00kpe\x00kpf\x00kpo" + + "\x00kpr\x00kpx\x00kqb\x00kqf\x00kqs\x00kqy\x00kraukrc\x00kri\x00krj\x00k" + + "rl\x00krs\x00kru\x00ksasksb\x00ksd\x00ksf\x00ksh\x00ksj\x00ksr\x00ktb" + + "\x00ktm\x00kto\x00kuurkub\x00kud\x00kue\x00kuj\x00kum\x00kun\x00kup\x00k" + + "us\x00kvomkvg\x00kvr\x00kvx\x00kw\x00\x01kwj\x00kwo\x00kxa\x00kxc\x00kxm" + + "\x00kxp\x00kxw\x00kxz\x00kyirkye\x00kyx\x00kzr\x00laatlab\x00lad\x00lag" + + "\x00lah\x00laj\x00las\x00lbtzlbe\x00lbu\x00lbw\x00lcm\x00lcp\x00ldb\x00l" + + "ed\x00lee\x00lem\x00lep\x00leq\x00leu\x00lez\x00lguglgg\x00liimlia\x00li" + + "d\x00lif\x00lig\x00lih\x00lij\x00lis\x00ljp\x00lki\x00lkt\x00lle\x00lln" + + "\x00lmn\x00lmo\x00lmp\x00lninlns\x00lnu\x00loaoloj\x00lok\x00lol\x00lor" + + "\x00los\x00loz\x00lrc\x00ltitltg\x00luublua\x00luo\x00luy\x00luz\x00lvav" + + "lwl\x00lzh\x00lzz\x00mad\x00maf\x00mag\x00mai\x00mak\x00man\x00mas\x00ma" + + "w\x00maz\x00mbh\x00mbo\x00mbq\x00mbu\x00mbw\x00mci\x00mcp\x00mcq\x00mcr" + + "\x00mcu\x00mda\x00mde\x00mdf\x00mdh\x00mdj\x00mdr\x00mdx\x00med\x00mee" + + "\x00mek\x00men\x00mer\x00met\x00meu\x00mfa\x00mfe\x00mfn\x00mfo\x00mfq" + + "\x00mglgmgh\x00mgl\x00mgo\x00mgp\x00mgy\x00mhahmhi\x00mhl\x00mirimif\x00" + + "min\x00mis\x00miw\x00mkkdmki\x00mkl\x00mkp\x00mkw\x00mlalmle\x00mlp\x00m" + + "ls\x00mmo\x00mmu\x00mmx\x00mnonmna\x00mnf\x00mni\x00mnw\x00moolmoa\x00mo" + + "e\x00moh\x00mos\x00mox\x00mpp\x00mps\x00mpt\x00mpx\x00mql\x00mrarmrd\x00" + + "mrj\x00mro\x00mssamtltmtc\x00mtf\x00mti\x00mtr\x00mua\x00mul\x00mur\x00m" + + "us\x00mva\x00mvn\x00mvy\x00mwk\x00mwr\x00mwv\x00mxc\x00mxm\x00myyamyk" + + "\x00mym\x00myv\x00myw\x00myx\x00myz\x00mzk\x00mzm\x00mzn\x00mzp\x00mzw" + + "\x00mzz\x00naaunac\x00naf\x00nah\x00nak\x00nan\x00nap\x00naq\x00nas\x00n" + + "bobnca\x00nce\x00ncf\x00nch\x00nco\x00ncu\x00nddendc\x00nds\x00neepneb" + + "\x00new\x00nex\x00nfr\x00ngdonga\x00ngb\x00ngl\x00nhb\x00nhe\x00nhw\x00n" + + "if\x00nii\x00nij\x00nin\x00niu\x00niy\x00niz\x00njo\x00nkg\x00nko\x00nll" + + "dnmg\x00nmz\x00nnnonnf\x00nnh\x00nnk\x00nnm\x00noornod\x00noe\x00non\x00" + + "nop\x00nou\x00nqo\x00nrblnrb\x00nsk\x00nsn\x00nso\x00nss\x00ntm\x00ntr" + + "\x00nui\x00nup\x00nus\x00nuv\x00nux\x00nvavnwb\x00nxq\x00nxr\x00nyyanym" + + "\x00nyn\x00nzi\x00occiogc\x00ojjiokr\x00okv\x00omrmong\x00onn\x00ons\x00" + + "opm\x00orrioro\x00oru\x00osssosa\x00ota\x00otk\x00ozm\x00paanpag\x00pal" + + "\x00pam\x00pap\x00pau\x00pbi\x00pcd\x00pcm\x00pdc\x00pdt\x00ped\x00peo" + + "\x00pex\x00pfl\x00phl\x00phn\x00pilipil\x00pip\x00pka\x00pko\x00plolpla" + + "\x00pms\x00png\x00pnn\x00pnt\x00pon\x00ppo\x00pra\x00prd\x00prg\x00psusp" + + "ss\x00ptorptp\x00puu\x00pwa\x00quuequc\x00qug\x00rai\x00raj\x00rao\x00rc" + + "f\x00rej\x00rel\x00res\x00rgn\x00rhg\x00ria\x00rif\x00rjs\x00rkt\x00rmoh" + + "rmf\x00rmo\x00rmt\x00rmu\x00rnunrna\x00rng\x00roonrob\x00rof\x00roo\x00r" + + "ro\x00rtm\x00ruusrue\x00rug\x00rw\x00\x04rwk\x00rwo\x00ryu\x00saansaf" + + "\x00sah\x00saq\x00sas\x00sat\x00sav\x00saz\x00sba\x00sbe\x00sbp\x00scrds" + + "ck\x00scl\x00scn\x00sco\x00scs\x00sdndsdc\x00sdh\x00semesef\x00seh\x00se" + + "i\x00ses\x00sgagsga\x00sgs\x00sgw\x00sgz\x00sh\x00\x02shi\x00shk\x00shn" + + "\x00shu\x00siinsid\x00sig\x00sil\x00sim\x00sjr\x00sklkskc\x00skr\x00sks" + + "\x00sllvsld\x00sli\x00sll\x00sly\x00smmosma\x00smi\x00smj\x00smn\x00smp" + + "\x00smq\x00sms\x00snnasnc\x00snk\x00snp\x00snx\x00sny\x00soomsok\x00soq" + + "\x00sou\x00soy\x00spd\x00spl\x00sps\x00sqqisrrpsrb\x00srn\x00srr\x00srx" + + "\x00ssswssd\x00ssg\x00ssy\x00stotstk\x00stq\x00suunsua\x00sue\x00suk\x00" + + "sur\x00sus\x00svweswwaswb\x00swc\x00swg\x00swp\x00swv\x00sxn\x00sxw\x00s" + + "yl\x00syr\x00szl\x00taamtaj\x00tal\x00tan\x00taq\x00tbc\x00tbd\x00tbf" + + "\x00tbg\x00tbo\x00tbw\x00tbz\x00tci\x00tcy\x00tdd\x00tdg\x00tdh\x00teelt" + + "ed\x00tem\x00teo\x00tet\x00tfi\x00tggktgc\x00tgo\x00tgu\x00thhathl\x00th" + + "q\x00thr\x00tiirtif\x00tig\x00tik\x00tim\x00tio\x00tiv\x00tkuktkl\x00tkr" + + "\x00tkt\x00tlgltlf\x00tlx\x00tly\x00tmh\x00tmy\x00tnsntnh\x00toontof\x00" + + "tog\x00toq\x00tpi\x00tpm\x00tpz\x00tqo\x00trurtru\x00trv\x00trw\x00tssot" + + "sd\x00tsf\x00tsg\x00tsj\x00tsw\x00ttatttd\x00tte\x00ttj\x00ttr\x00tts" + + "\x00ttt\x00tuh\x00tul\x00tum\x00tuq\x00tvd\x00tvl\x00tvu\x00twwitwh\x00t" + + "wq\x00txg\x00tyahtya\x00tyv\x00tzm\x00ubu\x00udm\x00ugiguga\x00ukkruli" + + "\x00umb\x00und\x00unr\x00unx\x00urrduri\x00urt\x00urw\x00usa\x00utr\x00u" + + "vh\x00uvl\x00uzzbvag\x00vai\x00van\x00veenvec\x00vep\x00viievic\x00viv" + + "\x00vls\x00vmf\x00vmw\x00voolvot\x00vro\x00vun\x00vut\x00walnwae\x00waj" + + "\x00wal\x00wan\x00war\x00wbp\x00wbq\x00wbr\x00wci\x00wer\x00wgi\x00whg" + + "\x00wib\x00wiu\x00wiv\x00wja\x00wji\x00wls\x00wmo\x00wnc\x00wni\x00wnu" + + "\x00woolwob\x00wos\x00wrs\x00wsk\x00wtm\x00wuu\x00wuv\x00wwa\x00xav\x00x" + + "bi\x00xcr\x00xes\x00xhhoxla\x00xlc\x00xld\x00xmf\x00xmn\x00xmr\x00xna" + + "\x00xnr\x00xog\x00xon\x00xpr\x00xrb\x00xsa\x00xsi\x00xsm\x00xsr\x00xwe" + + "\x00yam\x00yao\x00yap\x00yas\x00yat\x00yav\x00yay\x00yaz\x00yba\x00ybb" + + "\x00yby\x00yer\x00ygr\x00ygw\x00yiidyko\x00yle\x00ylg\x00yll\x00yml\x00y" + + "ooryon\x00yrb\x00yre\x00yrl\x00yss\x00yua\x00yue\x00yuj\x00yut\x00yuw" + + "\x00zahazag\x00zbl\x00zdj\x00zea\x00zgh\x00zhhozhx\x00zia\x00zlm\x00zmi" + + "\x00zne\x00zuulzxx\x00zza\x00\xff\xff\xff\xff" + +const langNoIndexOffset = 1330 + +// langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +// in lookup tables. The language ids for these language codes are derived directly +// from the letters and are not consecutive. +// Size: 2197 bytes, 2197 elements +var langNoIndex = [2197]uint8{ + // Entry 0 - 3F + 0xff, 0xf8, 0xed, 0xfe, 0xeb, 0xd3, 0x3b, 0xd2, + 0xfb, 0xbf, 0x7a, 0xfa, 0x37, 0x1d, 0x3c, 0x57, + 0x6e, 0x97, 0x73, 0x38, 0xfb, 0xea, 0xbf, 0x70, + 0xad, 0x03, 0xff, 0xff, 0xcf, 0x05, 0x84, 0x72, + 0xe9, 0xbf, 0xfd, 0xbf, 0xbf, 0xf7, 0xfd, 0x77, + 0x0f, 0xff, 0xef, 0x6f, 0xff, 0xfb, 0xdf, 0xe2, + 0xc9, 0xf8, 0x7f, 0x7e, 0x4d, 0xbc, 0x0a, 0x6a, + 0x7c, 0xea, 0xe3, 0xfa, 0x7a, 0xbf, 0x67, 0xff, + // Entry 40 - 7F + 0xff, 0xff, 0xff, 0xdf, 0x2a, 0x54, 0x91, 0xc0, + 0x5d, 0xe3, 0x97, 0x14, 0x07, 0x20, 0xdd, 0xed, + 0x9f, 0x3f, 0xc9, 0x21, 0xf8, 0x3f, 0x94, 0x35, + 0x7c, 0x5f, 0xff, 0x5f, 0x8e, 0x6e, 0xdf, 0xff, + 0xff, 0xff, 0x55, 0x7c, 0xd3, 0xfd, 0xbf, 0xb5, + 0x7b, 0xdf, 0x7f, 0xf7, 0xca, 0xfe, 0xdb, 0xa3, + 0xa8, 0xff, 0x1f, 0x67, 0x7d, 0xeb, 0xef, 0xce, + 0xff, 0xff, 0x9f, 0xff, 0xb7, 0xef, 0xfe, 0xcf, + // Entry 80 - BF + 0xdb, 0xff, 0xf3, 0xcd, 0xfb, 0x7f, 0xff, 0xff, + 0xbb, 0xee, 0xf7, 0xbd, 0xdb, 0xff, 0x5f, 0xf7, + 0xfd, 0xf2, 0xfd, 0xff, 0x5e, 0x2f, 0x3b, 0xba, + 0x7e, 0xff, 0xff, 0xfe, 0xf7, 0xff, 0xdd, 0xff, + 0xfd, 0xdf, 0xfb, 0xfe, 0x9d, 0xb4, 0xd3, 0xff, + 0xef, 0xff, 0xdf, 0xf7, 0x7f, 0xb7, 0xfd, 0xd5, + 0xa5, 0x77, 0x40, 0xff, 0x9c, 0xc1, 0x41, 0x2c, + 0x08, 0x21, 0x41, 0x00, 0x50, 0x40, 0x00, 0x80, + // Entry C0 - FF + 0xfb, 0x4a, 0xf2, 0x9f, 0xb4, 0x42, 0x41, 0x96, + 0x1b, 0x14, 0x08, 0xf3, 0x2b, 0xe7, 0x17, 0x56, + 0x05, 0x7d, 0x0e, 0x1c, 0x37, 0x7f, 0xf3, 0xef, + 0x97, 0xff, 0x5d, 0x38, 0x64, 0x08, 0x00, 0x10, + 0xbc, 0x85, 0xaf, 0xdf, 0xff, 0xff, 0x7b, 0x35, + 0x3e, 0xc7, 0xc7, 0xdf, 0xff, 0x01, 0x81, 0x00, + 0xb0, 0x05, 0x80, 0x00, 0x20, 0x00, 0x00, 0x03, + 0x40, 0x00, 0x40, 0x92, 0x21, 0x50, 0xb1, 0x5d, + // Entry 100 - 13F + 0xfd, 0xdc, 0xbe, 0x5e, 0x00, 0x00, 0x02, 0x64, + 0x0d, 0x19, 0x41, 0xdf, 0x79, 0x22, 0x00, 0x00, + 0x00, 0x5e, 0x64, 0xdc, 0x24, 0xe5, 0xd9, 0xe3, + 0xfe, 0xff, 0xfd, 0xcb, 0x9f, 0x14, 0x41, 0x0c, + 0x86, 0x00, 0xd1, 0x00, 0xf0, 0xc7, 0x67, 0x5f, + 0x56, 0x99, 0x5e, 0xb5, 0x6c, 0xaf, 0x03, 0x00, + 0x02, 0x00, 0x00, 0x00, 0xc0, 0x37, 0xda, 0x56, + 0x90, 0x6d, 0x01, 0x2e, 0x96, 0x69, 0x20, 0xfb, + // Entry 140 - 17F + 0xff, 0x3f, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x16, + 0x03, 0x00, 0x00, 0xb0, 0x14, 0x23, 0x50, 0x06, + 0x0a, 0x00, 0x01, 0x00, 0x00, 0x10, 0x11, 0x09, + 0x00, 0x00, 0x60, 0x10, 0x00, 0x00, 0x00, 0x10, + 0x00, 0x00, 0x44, 0x00, 0x00, 0x10, 0x00, 0x05, + 0x08, 0x00, 0x00, 0x05, 0x00, 0x80, 0x28, 0x04, + 0x00, 0x00, 0x40, 0xd5, 0x2d, 0x00, 0x64, 0x35, + 0x24, 0x52, 0xf4, 0xd5, 0xbf, 0x62, 0xc9, 0x03, + // Entry 180 - 1BF + 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x13, 0x39, 0x01, 0xdd, 0x57, 0x98, + 0x21, 0x18, 0x81, 0x08, 0x00, 0x01, 0x40, 0x82, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x40, 0x00, 0x44, 0x00, 0x00, 0x80, 0xea, + 0xa9, 0x39, 0x00, 0x02, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x03, 0x28, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x20, 0x04, 0xa6, 0x00, 0x04, 0x00, 0x00, + 0x81, 0x50, 0x00, 0x00, 0x00, 0x11, 0x84, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x55, + 0x02, 0x10, 0x08, 0x04, 0x00, 0x00, 0x00, 0x40, + 0x30, 0x83, 0x01, 0x00, 0x00, 0x00, 0x11, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1e, 0xcd, 0xbf, 0x7a, 0xbf, + // Entry 200 - 23F + 0xdf, 0xc3, 0x83, 0x82, 0xc0, 0xfb, 0x57, 0x27, + 0xed, 0x55, 0xe7, 0x01, 0x00, 0x20, 0xb2, 0xc5, + 0xa4, 0x45, 0x25, 0x9b, 0x02, 0xdf, 0xe1, 0xdf, + 0x03, 0x44, 0x08, 0x90, 0x01, 0x04, 0x81, 0xe3, + 0x92, 0x54, 0xdb, 0x28, 0xd3, 0x5f, 0xfe, 0x6d, + 0x79, 0xed, 0x1c, 0x7f, 0x04, 0x08, 0x00, 0x01, + 0x21, 0x12, 0x64, 0x5f, 0xdd, 0x0e, 0x85, 0x4f, + 0x40, 0x40, 0x00, 0x04, 0xf1, 0xfd, 0x3d, 0x54, + // Entry 240 - 27F + 0xe8, 0x03, 0xb4, 0x27, 0x23, 0x0d, 0x00, 0x00, + 0x20, 0x7b, 0x78, 0x02, 0x07, 0x84, 0x00, 0xf0, + 0xbb, 0x7e, 0x5a, 0x00, 0x18, 0x04, 0x81, 0x00, + 0x00, 0x00, 0x80, 0x10, 0x90, 0x1c, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0x04, + 0x08, 0xa0, 0x70, 0xa5, 0x0c, 0x40, 0x00, 0x00, + 0x91, 0x24, 0x04, 0x68, 0x00, 0x20, 0x70, 0xff, + 0x7b, 0x7f, 0x70, 0x00, 0x05, 0x9b, 0xdd, 0x66, + // Entry 280 - 2BF + 0x03, 0x00, 0x11, 0x00, 0x00, 0x00, 0x40, 0x05, + 0xb5, 0xb6, 0x80, 0x08, 0x04, 0x00, 0x04, 0x51, + 0xe2, 0xef, 0xfd, 0x3f, 0x05, 0x09, 0x08, 0x05, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x0c, 0x00, 0x00, 0x00, 0x00, 0x81, 0x00, 0x60, + 0xe7, 0x48, 0x00, 0x81, 0x20, 0xc0, 0x05, 0x80, + 0x03, 0x00, 0x00, 0x00, 0x8c, 0x50, 0x40, 0x04, + 0x84, 0x47, 0x84, 0x40, 0x20, 0x10, 0x00, 0x20, + // Entry 2C0 - 2FF + 0x02, 0x50, 0x80, 0x11, 0x00, 0x99, 0x6c, 0xe2, + 0x50, 0x27, 0x1d, 0x11, 0x29, 0x0e, 0x59, 0xe9, + 0x33, 0x08, 0x00, 0x20, 0x04, 0x40, 0x10, 0x00, + 0x00, 0x00, 0x50, 0x44, 0x92, 0x49, 0xd6, 0x5d, + 0xa7, 0x81, 0x47, 0x97, 0xfb, 0x00, 0x10, 0x00, + 0x08, 0x00, 0x80, 0x00, 0x40, 0x04, 0x00, 0x01, + 0x02, 0x00, 0x01, 0x40, 0x80, 0x00, 0x40, 0x08, + 0xd8, 0xeb, 0xf6, 0x39, 0xc4, 0x8d, 0x12, 0x00, + // Entry 300 - 33F + 0x00, 0x0c, 0x04, 0x01, 0x20, 0x20, 0xdd, 0xa0, + 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, + 0x04, 0x10, 0xd0, 0x9d, 0x95, 0x13, 0x04, 0x80, + 0x00, 0x01, 0xd0, 0x16, 0x40, 0x00, 0x10, 0xb0, + 0x10, 0x62, 0x4c, 0xd2, 0x02, 0x01, 0x4a, 0x00, + 0x46, 0x04, 0x00, 0x08, 0x02, 0x00, 0x20, 0x80, + 0x00, 0x80, 0x06, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0xf0, 0xd8, 0x6f, 0x15, 0x02, 0x08, 0x00, + // Entry 340 - 37F + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01, + 0x00, 0x10, 0x00, 0x00, 0x00, 0xf0, 0x84, 0xe3, + 0xdd, 0xbf, 0xf9, 0xf9, 0x3b, 0x7f, 0x7f, 0xdb, + 0xfd, 0xfc, 0xfe, 0xdf, 0xff, 0xfd, 0xff, 0xf6, + 0xfb, 0xfc, 0xf7, 0x1f, 0xff, 0xb3, 0x6c, 0xff, + 0xd9, 0xad, 0xdf, 0xfe, 0xef, 0xba, 0xdf, 0xff, + 0xff, 0xff, 0xb7, 0xdd, 0x7d, 0xbf, 0xab, 0x7f, + 0xfd, 0xfd, 0xdf, 0x2f, 0x9c, 0xdf, 0xf3, 0x6f, + // Entry 380 - 3BF + 0xdf, 0xdd, 0xff, 0xfb, 0xee, 0xd2, 0xab, 0x5f, + 0xd5, 0xdf, 0x7f, 0xff, 0xeb, 0xff, 0xe4, 0x4d, + 0xf9, 0xff, 0xfe, 0xf7, 0xfd, 0xdf, 0xfb, 0xbf, + 0xee, 0xdb, 0x6f, 0xef, 0xff, 0x7f, 0xff, 0xff, + 0xf7, 0x5f, 0xd3, 0x3b, 0xfd, 0xd9, 0xdf, 0xeb, + 0xbc, 0x08, 0x05, 0x24, 0xff, 0x07, 0x70, 0xfe, + 0xe6, 0x5e, 0x00, 0x08, 0x00, 0x83, 0x7d, 0x1f, + 0x06, 0xe6, 0x72, 0x60, 0xd1, 0x3c, 0x7f, 0x44, + // Entry 3C0 - 3FF + 0x02, 0x30, 0x9f, 0x7a, 0x16, 0xbd, 0x7f, 0x57, + 0xf2, 0xff, 0x31, 0xff, 0xf2, 0x1e, 0x90, 0xf7, + 0xf1, 0xf9, 0x45, 0x80, 0x01, 0x02, 0x00, 0x20, + 0x40, 0x54, 0x9f, 0x8a, 0xdf, 0xf9, 0x6e, 0x11, + 0x86, 0x51, 0xc0, 0xf3, 0xfb, 0x47, 0x40, 0x03, + 0x05, 0xd1, 0x50, 0x5c, 0x00, 0x40, 0x00, 0x10, + 0x04, 0x02, 0x00, 0x00, 0x0a, 0x00, 0x17, 0xd2, + 0xb9, 0xfd, 0xfc, 0xba, 0xfe, 0xef, 0xc7, 0xbe, + // Entry 400 - 43F + 0x53, 0x6f, 0xdf, 0xe7, 0xdb, 0x65, 0xbb, 0x7f, + 0xfa, 0xff, 0x77, 0xf3, 0xef, 0xbf, 0xfd, 0xf7, + 0xdf, 0xdf, 0x9b, 0x7f, 0xff, 0xff, 0x7f, 0x6f, + 0xf7, 0xfb, 0xeb, 0xdf, 0xbc, 0xff, 0xbf, 0x6b, + 0x7b, 0xfb, 0xff, 0xce, 0x76, 0xbd, 0xf7, 0xf7, + 0xdf, 0xdc, 0xf7, 0xf7, 0xff, 0xdf, 0xf3, 0xfe, + 0xef, 0xff, 0xff, 0xff, 0xb6, 0x7f, 0x7f, 0xde, + 0xf7, 0xb9, 0xeb, 0x77, 0xff, 0xfb, 0xbf, 0xdf, + // Entry 440 - 47F + 0xfd, 0xfe, 0xfb, 0xff, 0xfe, 0xeb, 0x1f, 0x7d, + 0x2f, 0xfd, 0xb6, 0xb5, 0xa5, 0xfc, 0xff, 0xfd, + 0x7f, 0x4e, 0xbf, 0x8f, 0xae, 0xff, 0xee, 0xdf, + 0x7f, 0xf7, 0x73, 0x02, 0x02, 0x04, 0xfc, 0xf7, + 0xff, 0xb7, 0xd7, 0xef, 0xfe, 0xcd, 0xf5, 0xce, + 0xe2, 0x8e, 0xe7, 0xbf, 0xb7, 0xff, 0x56, 0xfd, + 0xcd, 0xff, 0xfb, 0xff, 0xdf, 0xd7, 0xea, 0xff, + 0xe5, 0x5f, 0x6d, 0x0f, 0xa7, 0x51, 0x06, 0xc4, + // Entry 480 - 4BF + 0x93, 0x50, 0x5d, 0xaf, 0xa6, 0xff, 0x99, 0xfb, + 0x63, 0x1d, 0x53, 0xff, 0xef, 0xb7, 0x35, 0x20, + 0x14, 0x00, 0x55, 0x51, 0xc2, 0x65, 0xf5, 0x41, + 0xe2, 0xff, 0xfc, 0xdf, 0x02, 0x85, 0xc5, 0x05, + 0x00, 0x22, 0x00, 0x74, 0x69, 0x10, 0x08, 0x05, + 0x41, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x51, 0x20, 0x05, 0x04, 0x01, 0x00, 0x00, + 0x06, 0x11, 0x20, 0x00, 0x18, 0x01, 0x92, 0xf1, + // Entry 4C0 - 4FF + 0xfd, 0x47, 0x69, 0x06, 0x95, 0x06, 0x57, 0xed, + 0xfb, 0x4d, 0x1c, 0x6b, 0x83, 0x04, 0x62, 0x40, + 0x00, 0x11, 0x42, 0x00, 0x00, 0x00, 0x54, 0x83, + 0xb8, 0x4f, 0x10, 0x8e, 0x89, 0x46, 0xde, 0xf7, + 0x13, 0x31, 0x00, 0x20, 0x00, 0x00, 0x00, 0x90, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x10, 0x00, + 0x01, 0x00, 0x00, 0xf0, 0x5b, 0xf4, 0xbe, 0x3d, + 0xbe, 0xcf, 0xf7, 0xaf, 0x42, 0x04, 0x84, 0x41, + // Entry 500 - 53F + 0x30, 0xff, 0x79, 0x72, 0x04, 0x00, 0x00, 0x49, + 0x2d, 0x14, 0x27, 0x5f, 0xed, 0xf1, 0x3f, 0xe7, + 0x3f, 0x00, 0x00, 0x02, 0xc6, 0xa0, 0x1e, 0xf8, + 0xbb, 0xff, 0xfd, 0xfb, 0xb7, 0xfd, 0xe7, 0xf7, + 0xfd, 0xfc, 0xd5, 0xed, 0x47, 0xf4, 0x7e, 0x10, + 0x01, 0x01, 0x84, 0x6d, 0xff, 0xf7, 0xdd, 0xf9, + 0x5b, 0x05, 0x86, 0xed, 0xf5, 0x77, 0xbd, 0x3c, + 0x00, 0x00, 0x00, 0x42, 0x71, 0x42, 0x00, 0x40, + // Entry 540 - 57F + 0x00, 0x00, 0x01, 0x43, 0x19, 0x24, 0x08, 0x00, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + // Entry 580 - 5BF + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xab, 0xbd, 0xe7, 0x57, 0xee, 0x13, 0x5d, + 0x09, 0xc1, 0x40, 0x21, 0xfa, 0x17, 0x01, 0x80, + 0x00, 0x00, 0x00, 0x00, 0xf0, 0xce, 0xfb, 0xbf, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x00, 0x30, 0x15, 0xa3, 0x10, 0x00, 0x00, 0x00, + 0x11, 0x04, 0x16, 0x00, 0x00, 0x02, 0x20, 0x81, + 0xa3, 0x01, 0x50, 0x00, 0x00, 0x83, 0x11, 0x40, + // Entry 5C0 - 5FF + 0x00, 0x00, 0x00, 0xf0, 0xdd, 0x7b, 0xbe, 0x02, + 0xaa, 0x10, 0x5d, 0x98, 0x52, 0x00, 0x80, 0x20, + 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x02, 0x02, + 0x3d, 0x40, 0x10, 0x02, 0x10, 0x61, 0x5a, 0x9d, + 0x31, 0x00, 0x00, 0x00, 0x01, 0x18, 0x02, 0x20, + 0x00, 0x00, 0x01, 0x00, 0x42, 0x00, 0x20, 0x00, + 0x00, 0x1f, 0xdf, 0xd2, 0xb9, 0xff, 0xfd, 0x3f, + 0x1f, 0x98, 0xcf, 0x9c, 0xff, 0xaf, 0x5f, 0xfe, + // Entry 600 - 63F + 0x7b, 0x4b, 0x40, 0x10, 0xe1, 0xfd, 0xaf, 0xd9, + 0xb7, 0xf6, 0xfb, 0xb3, 0xc7, 0xff, 0x6f, 0xf1, + 0x73, 0xb1, 0x7f, 0x9f, 0x7f, 0xbd, 0xfc, 0xb7, + 0xee, 0x1c, 0xfa, 0xcb, 0xef, 0xdd, 0xf9, 0xbd, + 0x6e, 0xae, 0x55, 0xfd, 0x6e, 0x81, 0x76, 0x9f, + 0xd4, 0x77, 0xf5, 0x7d, 0xfb, 0xff, 0xeb, 0xfe, + 0xbe, 0x5f, 0x46, 0x5b, 0xe9, 0x5f, 0x50, 0x18, + 0x02, 0xfa, 0xf7, 0x9d, 0x15, 0x97, 0x05, 0x0f, + // Entry 640 - 67F + 0x75, 0xc4, 0x7d, 0x81, 0x92, 0xf5, 0x57, 0x6c, + 0xff, 0xe4, 0xef, 0x6f, 0xff, 0xfc, 0xdd, 0xde, + 0xfc, 0xfd, 0x76, 0x5f, 0x7a, 0x3f, 0x00, 0x98, + 0x02, 0xfb, 0xa3, 0xef, 0xf3, 0xd6, 0xf2, 0xff, + 0xb9, 0xda, 0x7d, 0xd0, 0x3e, 0x15, 0x7b, 0xb4, + 0xf5, 0x3e, 0xff, 0xff, 0xf1, 0xf7, 0xff, 0xe7, + 0x5f, 0xff, 0xff, 0x9e, 0xdf, 0xf6, 0xd7, 0xb9, + 0xef, 0x27, 0x80, 0xbb, 0xc5, 0xff, 0xff, 0xe3, + // Entry 680 - 6BF + 0x97, 0x9d, 0xbf, 0x9f, 0xf7, 0xc7, 0xfd, 0x37, + 0xce, 0x7f, 0x44, 0x1d, 0x73, 0x7f, 0xf8, 0xda, + 0x5d, 0xce, 0x7d, 0x06, 0xb9, 0xea, 0x79, 0xa0, + 0x1a, 0x20, 0x00, 0x30, 0x02, 0x04, 0x24, 0x08, + 0x04, 0x00, 0x00, 0x40, 0xd4, 0x02, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x04, 0x00, 0x20, 0x09, 0x06, + 0x50, 0x00, 0x08, 0x00, 0x00, 0x00, 0x24, 0x00, + 0x04, 0x00, 0x10, 0xdc, 0x58, 0xd7, 0x0d, 0x0f, + // Entry 6C0 - 6FF + 0x54, 0x4d, 0xf1, 0x16, 0x44, 0xd5, 0x42, 0x08, + 0x40, 0x02, 0x00, 0x40, 0x00, 0x08, 0x00, 0x00, + 0x00, 0xdc, 0xfb, 0xcb, 0x0e, 0x58, 0x48, 0x41, + 0x24, 0x20, 0x04, 0x00, 0x30, 0x12, 0x40, 0x00, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x80, 0x10, 0x10, 0xab, + 0x6d, 0x93, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x80, 0x80, 0x25, 0x00, 0x00, + // Entry 700 - 73F + 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, + 0x80, 0x86, 0xc2, 0x00, 0x00, 0x01, 0x00, 0x01, + 0xff, 0x18, 0x02, 0x00, 0x02, 0xf0, 0xfd, 0x79, + 0x3b, 0x00, 0x25, 0x00, 0x00, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, + 0x03, 0x00, 0x09, 0x20, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 740 - 77F + 0x00, 0x00, 0x00, 0xef, 0xd5, 0xfd, 0xcf, 0x7e, + 0xb0, 0x11, 0x00, 0x00, 0x00, 0x92, 0x01, 0x46, + 0xcd, 0xf9, 0x5c, 0x00, 0x01, 0x00, 0x30, 0x04, + 0x04, 0x55, 0x00, 0x01, 0x04, 0xf4, 0x3f, 0x4a, + 0x01, 0x00, 0x00, 0xb0, 0x80, 0x20, 0x55, 0x75, + 0x97, 0x7c, 0xdf, 0x31, 0xcc, 0x68, 0xd1, 0x03, + 0xd5, 0x57, 0x27, 0x14, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x2c, 0xf7, 0xcb, 0x1f, 0x14, 0x60, + // Entry 780 - 7BF + 0x83, 0x68, 0x01, 0x10, 0x8b, 0x38, 0x8a, 0x01, + 0x00, 0x00, 0x20, 0x00, 0x24, 0x44, 0x00, 0x00, + 0x10, 0x03, 0x31, 0x02, 0x01, 0x00, 0x00, 0xf0, + 0xf5, 0xff, 0xd5, 0x97, 0xbc, 0x70, 0xd6, 0x78, + 0x78, 0x15, 0x50, 0x05, 0xa4, 0x84, 0xa9, 0x41, + 0x00, 0x00, 0x00, 0x6b, 0x39, 0x52, 0x74, 0x40, + 0xe8, 0x30, 0x90, 0x6a, 0x92, 0x00, 0x00, 0x02, + 0xff, 0xef, 0xff, 0x4b, 0x85, 0x53, 0xf4, 0xed, + // Entry 7C0 - 7FF + 0xdd, 0xbf, 0xf2, 0x5d, 0xc7, 0x0c, 0xd5, 0x42, + 0xfc, 0xff, 0xf7, 0x1f, 0x00, 0x80, 0x40, 0x56, + 0xcc, 0x16, 0x9e, 0xea, 0x35, 0x7d, 0xef, 0xff, + 0xbd, 0xa4, 0xaf, 0x01, 0x44, 0x18, 0x01, 0x4d, + 0x4e, 0x4a, 0x08, 0x50, 0x28, 0x30, 0xe0, 0x80, + 0x10, 0x20, 0x24, 0x00, 0xff, 0x2f, 0xd3, 0x60, + 0xfe, 0x01, 0x02, 0x88, 0x2a, 0x40, 0x16, 0x01, + 0x01, 0x15, 0x2b, 0x3c, 0x01, 0x00, 0x00, 0x10, + // Entry 800 - 83F + 0x90, 0x49, 0x41, 0x02, 0x02, 0x01, 0xe1, 0xbf, + 0xbf, 0x03, 0x00, 0x00, 0x10, 0xdc, 0xa3, 0xd1, + 0x40, 0x9c, 0x44, 0xdf, 0xf5, 0x8f, 0x66, 0xb3, + 0x55, 0x20, 0xd4, 0xc1, 0xd8, 0x30, 0x3d, 0x80, + 0x00, 0x00, 0x00, 0x04, 0xd4, 0x11, 0xc5, 0x84, + 0x2f, 0x50, 0x00, 0x22, 0x50, 0x6e, 0xbd, 0x93, + 0x07, 0x00, 0x20, 0x10, 0x84, 0xb2, 0x45, 0x10, + 0x06, 0x44, 0x00, 0x00, 0x12, 0x02, 0x11, 0x00, + // Entry 840 - 87F + 0xf0, 0xfb, 0xfd, 0x7f, 0x05, 0x00, 0x16, 0x89, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x03, 0x30, 0x02, 0x28, + 0x84, 0x00, 0x21, 0xc0, 0x23, 0x24, 0x00, 0x00, + 0x00, 0xcb, 0xe4, 0x3a, 0x46, 0x88, 0x54, 0xf1, + 0xef, 0xff, 0x7f, 0x12, 0x01, 0x01, 0x84, 0x50, + 0x07, 0xfc, 0xff, 0xff, 0x0f, 0x01, 0x00, 0x40, + 0x10, 0x38, 0x01, 0x01, 0x1c, 0x12, 0x40, 0xe1, + // Entry 880 - 8BF + 0x76, 0x16, 0x08, 0x03, 0x10, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x24, + 0x0a, 0x00, 0x80, 0x00, 0x00, +} + +// altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +// to 2-letter language codes that cannot be derived using the method described above. +// Each 3-letter code is followed by its 1-byte langID. +const altLangISO3 tag.Index = "---\x00cor\x00hbs\x01heb\x02kin\x03spa\x04yid\x05\xff\xff\xff\xff" + +// altLangIndex is used to convert indexes in altLangISO3 to langIDs. +// Size: 12 bytes, 6 elements +var altLangIndex = [6]uint16{ + 0x0281, 0x0407, 0x01fb, 0x03e5, 0x013e, 0x0208, +} + +// AliasMap maps langIDs to their suggested replacements. +// Size: 772 bytes, 193 elements +var AliasMap = [193]FromTo{ + 0: {From: 0x82, To: 0x88}, + 1: {From: 0x187, To: 0x1ae}, + 2: {From: 0x1f3, To: 0x1e1}, + 3: {From: 0x1fb, To: 0x1bc}, + 4: {From: 0x208, To: 0x512}, + 5: {From: 0x20f, To: 0x20e}, + 6: {From: 0x310, To: 0x3dc}, + 7: {From: 0x347, To: 0x36f}, + 8: {From: 0x407, To: 0x432}, + 9: {From: 0x47a, To: 0x153}, + 10: {From: 0x490, To: 0x451}, + 11: {From: 0x4a2, To: 0x21}, + 12: {From: 0x53e, To: 0x544}, + 13: {From: 0x58f, To: 0x12d}, + 14: {From: 0x62b, To: 0x34}, + 15: {From: 0x62f, To: 0x14}, + 16: {From: 0x630, To: 0x1eb1}, + 17: {From: 0x651, To: 0x431}, + 18: {From: 0x662, To: 0x431}, + 19: {From: 0x6ed, To: 0x3a}, + 20: {From: 0x6f8, To: 0x1d7}, + 21: {From: 0x709, To: 0x3625}, + 22: {From: 0x73e, To: 0x21a1}, + 23: {From: 0x7b3, To: 0x56}, + 24: {From: 0x7b9, To: 0x299b}, + 25: {From: 0x7c5, To: 0x58}, + 26: {From: 0x7e6, To: 0x145}, + 27: {From: 0x80c, To: 0x5a}, + 28: {From: 0x815, To: 0x8d}, + 29: {From: 0x87e, To: 0x810}, + 30: {From: 0x8a8, To: 0x8b7}, + 31: {From: 0x8c3, To: 0xee3}, + 32: {From: 0x8fa, To: 0x1dc}, + 33: {From: 0x9ef, To: 0x331}, + 34: {From: 0xa36, To: 0x2c5}, + 35: {From: 0xa3d, To: 0xbf}, + 36: {From: 0xabe, To: 0x3322}, + 37: {From: 0xb38, To: 0x529}, + 38: {From: 0xb75, To: 0x265a}, + 39: {From: 0xb7e, To: 0xbc3}, + 40: {From: 0xb9b, To: 0x44e}, + 41: {From: 0xbbc, To: 0x4229}, + 42: {From: 0xbbf, To: 0x529}, + 43: {From: 0xbfe, To: 0x2da7}, + 44: {From: 0xc2e, To: 0x3181}, + 45: {From: 0xcb9, To: 0xf3}, + 46: {From: 0xd08, To: 0xfa}, + 47: {From: 0xdc8, To: 0x11a}, + 48: {From: 0xdd7, To: 0x32d}, + 49: {From: 0xdf8, To: 0xdfb}, + 50: {From: 0xdfe, To: 0x531}, + 51: {From: 0xe01, To: 0xdf3}, + 52: {From: 0xedf, To: 0x205a}, + 53: {From: 0xee9, To: 0x222e}, + 54: {From: 0xeee, To: 0x2e9a}, + 55: {From: 0xf39, To: 0x367}, + 56: {From: 0x10d0, To: 0x140}, + 57: {From: 0x1104, To: 0x2d0}, + 58: {From: 0x11a0, To: 0x1ec}, + 59: {From: 0x1279, To: 0x21}, + 60: {From: 0x1424, To: 0x15e}, + 61: {From: 0x1470, To: 0x14e}, + 62: {From: 0x151f, To: 0xd9b}, + 63: {From: 0x1523, To: 0x390}, + 64: {From: 0x1532, To: 0x19f}, + 65: {From: 0x1580, To: 0x210}, + 66: {From: 0x1583, To: 0x10d}, + 67: {From: 0x15a3, To: 0x3caf}, + 68: {From: 0x1630, To: 0x222e}, + 69: {From: 0x166a, To: 0x19b}, + 70: {From: 0x16c8, To: 0x136}, + 71: {From: 0x1700, To: 0x29f8}, + 72: {From: 0x1718, To: 0x194}, + 73: {From: 0x1727, To: 0xf3f}, + 74: {From: 0x177a, To: 0x178}, + 75: {From: 0x1809, To: 0x17b6}, + 76: {From: 0x1816, To: 0x18f3}, + 77: {From: 0x188a, To: 0x436}, + 78: {From: 0x1979, To: 0x1d01}, + 79: {From: 0x1a74, To: 0x2bb0}, + 80: {From: 0x1a8a, To: 0x1f8}, + 81: {From: 0x1b5a, To: 0x1fa}, + 82: {From: 0x1b86, To: 0x1515}, + 83: {From: 0x1d64, To: 0x2c9b}, + 84: {From: 0x2038, To: 0x37b1}, + 85: {From: 0x203d, To: 0x20dd}, + 86: {From: 0x2042, To: 0x2e00}, + 87: {From: 0x205a, To: 0x30b}, + 88: {From: 0x20e3, To: 0x274}, + 89: {From: 0x20ee, To: 0x263}, + 90: {From: 0x20f2, To: 0x22d}, + 91: {From: 0x20f9, To: 0x256}, + 92: {From: 0x210f, To: 0x21eb}, + 93: {From: 0x2135, To: 0x27d}, + 94: {From: 0x2160, To: 0x913}, + 95: {From: 0x2199, To: 0x121}, + 96: {From: 0x21ce, To: 0x1561}, + 97: {From: 0x21e6, To: 0x504}, + 98: {From: 0x21f4, To: 0x49f}, + 99: {From: 0x21fb, To: 0x269}, + 100: {From: 0x222d, To: 0x121}, + 101: {From: 0x2237, To: 0x121}, + 102: {From: 0x2248, To: 0x217d}, + 103: {From: 0x2262, To: 0x92a}, + 104: {From: 0x2316, To: 0x3226}, + 105: {From: 0x236a, To: 0x2835}, + 106: {From: 0x2382, To: 0x3365}, + 107: {From: 0x2472, To: 0x2c7}, + 108: {From: 0x24e4, To: 0x2ff}, + 109: {From: 0x24f0, To: 0x2fa}, + 110: {From: 0x24fa, To: 0x31f}, + 111: {From: 0x2550, To: 0xb5b}, + 112: {From: 0x25a9, To: 0xe2}, + 113: {From: 0x263e, To: 0x2d0}, + 114: {From: 0x26c9, To: 0x26b4}, + 115: {From: 0x26f9, To: 0x3c8}, + 116: {From: 0x2727, To: 0x3caf}, + 117: {From: 0x2755, To: 0x6a4}, + 118: {From: 0x2765, To: 0x26b4}, + 119: {From: 0x2789, To: 0x4358}, + 120: {From: 0x27c9, To: 0x2001}, + 121: {From: 0x28ea, To: 0x27b1}, + 122: {From: 0x28ef, To: 0x2837}, + 123: {From: 0x28fe, To: 0xaa5}, + 124: {From: 0x2914, To: 0x351}, + 125: {From: 0x2986, To: 0x2da7}, + 126: {From: 0x29f0, To: 0x96b}, + 127: {From: 0x2b1a, To: 0x38d}, + 128: {From: 0x2bfc, To: 0x395}, + 129: {From: 0x2c3f, To: 0x3caf}, + 130: {From: 0x2ce1, To: 0x2201}, + 131: {From: 0x2cfc, To: 0x3be}, + 132: {From: 0x2d13, To: 0x597}, + 133: {From: 0x2d47, To: 0x148}, + 134: {From: 0x2d48, To: 0x148}, + 135: {From: 0x2dff, To: 0x2f1}, + 136: {From: 0x2e08, To: 0x19cc}, + 137: {From: 0x2e10, To: 0xc45}, + 138: {From: 0x2e1a, To: 0x2d95}, + 139: {From: 0x2e21, To: 0x292}, + 140: {From: 0x2e54, To: 0x7d}, + 141: {From: 0x2e65, To: 0x2282}, + 142: {From: 0x2e97, To: 0x1a4}, + 143: {From: 0x2ea0, To: 0x2e9b}, + 144: {From: 0x2eef, To: 0x2ed7}, + 145: {From: 0x3193, To: 0x3c4}, + 146: {From: 0x3366, To: 0x338e}, + 147: {From: 0x342a, To: 0x3dc}, + 148: {From: 0x34ee, To: 0x18d0}, + 149: {From: 0x35c8, To: 0x2c9b}, + 150: {From: 0x35e6, To: 0x412}, + 151: {From: 0x35f5, To: 0x24b}, + 152: {From: 0x360d, To: 0x1dc}, + 153: {From: 0x3658, To: 0x246}, + 154: {From: 0x3676, To: 0x3f4}, + 155: {From: 0x36fd, To: 0x445}, + 156: {From: 0x3747, To: 0x3b42}, + 157: {From: 0x37c0, To: 0x121}, + 158: {From: 0x3816, To: 0x38f2}, + 159: {From: 0x382a, To: 0x2b48}, + 160: {From: 0x382b, To: 0x2c9b}, + 161: {From: 0x382f, To: 0xa9}, + 162: {From: 0x3832, To: 0x3228}, + 163: {From: 0x386c, To: 0x39a6}, + 164: {From: 0x3892, To: 0x3fc0}, + 165: {From: 0x38a0, To: 0x45f}, + 166: {From: 0x38a5, To: 0x39d7}, + 167: {From: 0x38b4, To: 0x1fa4}, + 168: {From: 0x38b5, To: 0x2e9a}, + 169: {From: 0x38fa, To: 0x38f1}, + 170: {From: 0x395c, To: 0x47e}, + 171: {From: 0x3b4e, To: 0xd91}, + 172: {From: 0x3b78, To: 0x137}, + 173: {From: 0x3c99, To: 0x4bc}, + 174: {From: 0x3fbd, To: 0x100}, + 175: {From: 0x4208, To: 0xa91}, + 176: {From: 0x42be, To: 0x573}, + 177: {From: 0x42f9, To: 0x3f60}, + 178: {From: 0x4378, To: 0x25a}, + 179: {From: 0x43b8, To: 0xe6c}, + 180: {From: 0x43cd, To: 0x10f}, + 181: {From: 0x43d4, To: 0x4848}, + 182: {From: 0x44af, To: 0x3322}, + 183: {From: 0x44e3, To: 0x512}, + 184: {From: 0x45ca, To: 0x2409}, + 185: {From: 0x45dd, To: 0x26dc}, + 186: {From: 0x4610, To: 0x48ae}, + 187: {From: 0x46ae, To: 0x46a0}, + 188: {From: 0x473e, To: 0x4745}, + 189: {From: 0x4817, To: 0x3503}, + 190: {From: 0x483b, To: 0x208b}, + 191: {From: 0x4916, To: 0x31f}, + 192: {From: 0x49a7, To: 0x523}, +} + +// Size: 193 bytes, 193 elements +var AliasTypes = [193]AliasType{ + // Entry 0 - 3F + 1, 0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 1, 0, 0, 0, 0, + 1, 2, 1, 1, 2, 0, 0, 1, 0, 1, 2, 1, 1, 0, 0, 0, + 0, 2, 1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 0, 1, 2, 1, + 1, 1, 1, 0, 0, 0, 0, 2, 1, 1, 1, 1, 2, 1, 0, 1, + // Entry 40 - 7F + 1, 2, 2, 0, 0, 1, 2, 0, 1, 0, 1, 1, 1, 1, 0, 0, + 2, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 0, + 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, + // Entry 80 - BF + 1, 0, 0, 1, 0, 2, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, + 0, 1, 1, 2, 0, 0, 2, 0, 0, 1, 1, 1, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 0, + 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, + // Entry C0 - FF + 1, +} + +const ( + _Latn = 91 + _Hani = 57 + _Hans = 59 + _Hant = 60 + _Qaaa = 149 + _Qaai = 157 + _Qabx = 198 + _Zinh = 255 + _Zyyy = 260 + _Zzzz = 261 +) + +// script is an alphabetically sorted list of ISO 15924 codes. The index +// of the script in the string, divided by 4, is the internal scriptID. +const script tag.Index = "" + // Size: 1052 bytes + "----AdlmAfakAghbAhomArabAranArmiArmnAvstBaliBamuBassBatkBengBhksBlisBopo" + + "BrahBraiBugiBuhdCakmCansCariChamCherChrsCirtCoptCpmnCprtCyrlCyrsDevaDiak" + + "DogrDsrtDuplEgydEgyhEgypElbaElymEthiGeokGeorGlagGongGonmGothGranGrekGujr" + + "GuruHanbHangHaniHanoHansHantHatrHebrHiraHluwHmngHmnpHrktHungIndsItalJamo" + + "JavaJpanJurcKaliKanaKawiKharKhmrKhojKitlKitsKndaKoreKpelKthiLanaLaooLatf" + + "LatgLatnLekeLepcLimbLinaLinbLisuLomaLyciLydiMahjMakaMandManiMarcMayaMedf" + + "MendMercMeroMlymModiMongMoonMrooMteiMultMymrNagmNandNarbNbatNewaNkdbNkgb" + + "NkooNshuOgamOlckOrkhOryaOsgeOsmaOugrPalmPaucPcunPelmPermPhagPhliPhlpPhlv" + + "PhnxPiqdPlrdPrtiPsinQaaaQaabQaacQaadQaaeQaafQaagQaahQaaiQaajQaakQaalQaam" + + "QaanQaaoQaapQaaqQaarQaasQaatQaauQaavQaawQaaxQaayQaazQabaQabbQabcQabdQabe" + + "QabfQabgQabhQabiQabjQabkQablQabmQabnQaboQabpQabqQabrQabsQabtQabuQabvQabw" + + "QabxRanjRjngRohgRoroRunrSamrSaraSarbSaurSgnwShawShrdShuiSiddSindSinhSogd" + + "SogoSoraSoyoSundSunuSyloSyrcSyreSyrjSyrnTagbTakrTaleTaluTamlTangTavtTelu" + + "TengTfngTglgThaaThaiTibtTirhTnsaTotoUgarVaiiVispVithWaraWchoWoleXpeoXsux" + + "YeziYiiiZanbZinhZmthZsyeZsymZxxxZyyyZzzz\xff\xff\xff\xff" + +// suppressScript is an index from langID to the dominant script for that language, +// if it exists. If a script is given, it should be suppressed from the language tag. +// Size: 1330 bytes, 1330 elements +var suppressScript = [1330]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry C0 - FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x5b, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xed, 0x00, 0x00, 0x00, 0x00, 0xef, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x5b, 0x00, + // Entry 140 - 17F + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x5b, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x35, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x22, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x5b, 0x00, 0x5b, 0x5b, 0x00, 0x08, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x5b, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x49, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x2e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x20, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x4f, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x53, 0x00, 0x00, 0x54, 0x00, 0x22, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x58, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x6f, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x5b, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + // Entry 340 - 37F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x5b, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5b, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 380 - 3BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x83, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, + // Entry 3C0 - 3FF + 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 400 - 43F + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xd6, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + // Entry 440 - 47F + 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5b, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xe9, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xee, 0x00, 0x00, 0x00, 0x2c, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + // Entry 480 - 4BF + 0x5b, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x5b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 4C0 - 4FF + 0x5b, 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 500 - 53F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, + 0x00, 0x00, +} + +const ( + _001 = 1 + _419 = 31 + _BR = 65 + _CA = 73 + _ES = 111 + _GB = 124 + _MD = 189 + _PT = 239 + _UK = 307 + _US = 310 + _ZZ = 358 + _XA = 324 + _XC = 326 + _XK = 334 +) + +// isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +// for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +// the UN.M49 codes used for groups.) +const isoRegionOffset = 32 + +// regionTypes defines the status of a region for various standards. +// Size: 359 bytes, 359 elements +var regionTypes = [359]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x05, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, 0x04, 0x06, + 0x04, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x00, + 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x06, 0x04, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry 80 - BF + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x04, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + // Entry C0 - FF + 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + // Entry 100 - 13F + 0x05, 0x05, 0x05, 0x06, 0x00, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x04, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x02, 0x06, 0x04, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x00, 0x06, 0x06, 0x06, 0x06, + // Entry 140 - 17F + 0x06, 0x06, 0x00, 0x06, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x04, 0x06, + 0x06, 0x04, 0x06, 0x06, 0x04, 0x06, 0x05, +} + +// regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +// Each 2-letter codes is followed by two bytes with the following meaning: +// - [A-Z}{2}: the first letter of the 2-letter code plus these two +// letters form the 3-letter ISO code. +// - 0, n: index into altRegionISO3. +const regionISO tag.Index = "" + // Size: 1312 bytes + "AAAAACSCADNDAEREAFFGAGTGAIIAALLBAMRMANNTAOGOAQTAARRGASSMATUTAUUSAWBWAXLA" + + "AZZEBAIHBBRBBDGDBEELBFFABGGRBHHRBIDIBJENBLLMBMMUBNRNBOOLBQESBRRABSHSBTTN" + + "BUURBVVTBWWABYLRBZLZCAANCCCKCDODCFAFCGOGCHHECIIVCKOKCLHLCMMRCNHNCOOLCPPT" + + "CQ CRRICS\x00\x00CTTECUUBCVPVCWUWCXXRCYYPCZZEDDDRDEEUDGGADJJIDKNKDMMADO" + + "OMDYHYDZZAEA ECCUEESTEGGYEHSHERRIESSPETTHEU\x00\x03EZ FIINFJJIFKLKFMSM" + + "FOROFQ\x00\x18FRRAFXXXGAABGBBRGDRDGEEOGFUFGGGYGHHAGIIBGLRLGMMBGNINGPLPGQ" + + "NQGRRCGS\x00\x06GTTMGUUMGWNBGYUYHKKGHMMDHNNDHRRVHTTIHUUNHVVOIC IDDNIERL" + + "ILSRIMMNINNDIOOTIQRQIRRNISSLITTAJEEYJMAMJOORJPPNJTTNKEENKGGZKHHMKIIRKM" + + "\x00\x09KNNAKP\x00\x0cKRORKWWTKY\x00\x0fKZAZLAAOLBBNLCCALIIELKKALRBRLSSO" + + "LTTULUUXLVVALYBYMAARMCCOMDDAMENEMFAFMGDGMHHLMIIDMKKDMLLIMMMRMNNGMOACMPNP" + + "MQTQMRRTMSSRMTLTMUUSMVDVMWWIMXEXMYYSMZOZNAAMNCCLNEERNFFKNGGANHHBNIICNLLD" + + "NOORNPPLNQ\x00\x1eNRRUNTTZNUIUNZZLOMMNPAANPCCIPEERPFYFPGNGPHHLPKAKPLOLPM" + + "\x00\x12PNCNPRRIPSSEPTRTPUUSPWLWPYRYPZCZQAATQMMMQNNNQOOOQPPPQQQQQRRRQSSS" + + "QTTTQU\x00\x03QVVVQWWWQXXXQYYYQZZZREEURHHOROOURS\x00\x15RUUSRWWASAAUSBLB" + + "SCYCSDDNSEWESGGPSHHNSIVNSJJMSKVKSLLESMMRSNENSOOMSRURSSSDSTTPSUUNSVLVSXXM" + + "SYYRSZWZTAAATCCATDCDTF\x00\x18TGGOTHHATJJKTKKLTLLSTMKMTNUNTOONTPMPTRURTT" + + "TOTVUVTWWNTZZAUAKRUGGAUK UMMIUN USSAUYRYUZZBVAATVCCTVDDRVEENVGGBVIIRVN" + + "NMVUUTWFLFWKAKWSSMXAAAXBBBXCCCXDDDXEEEXFFFXGGGXHHHXIIIXJJJXKKKXLLLXMMMXN" + + "NNXOOOXPPPXQQQXRRRXSSSXTTTXUUUXVVVXWWWXXXXXYYYXZZZYDMDYEEMYT\x00\x1bYUUG" + + "ZAAFZMMBZRARZWWEZZZZ\xff\xff\xff\xff" + +// altRegionISO3 holds a list of 3-letter region codes that cannot be +// mapped to 2-letter codes using the default algorithm. This is a short list. +const altRegionISO3 string = "SCGQUUSGSCOMPRKCYMSPMSRBATFMYTATN" + +// altRegionIDs holds a list of regionIDs the positions of which match those +// of the 3-letter ISO codes in altRegionISO3. +// Size: 22 bytes, 11 elements +var altRegionIDs = [11]uint16{ + 0x0058, 0x0071, 0x0089, 0x00a9, 0x00ab, 0x00ae, 0x00eb, 0x0106, + 0x0122, 0x0160, 0x00dd, +} + +// Size: 80 bytes, 20 elements +var regionOldMap = [20]FromTo{ + 0: {From: 0x44, To: 0xc5}, + 1: {From: 0x59, To: 0xa8}, + 2: {From: 0x60, To: 0x61}, + 3: {From: 0x67, To: 0x3b}, + 4: {From: 0x7a, To: 0x79}, + 5: {From: 0x94, To: 0x37}, + 6: {From: 0xa4, To: 0x134}, + 7: {From: 0xc2, To: 0x134}, + 8: {From: 0xd8, To: 0x140}, + 9: {From: 0xdd, To: 0x2b}, + 10: {From: 0xf0, To: 0x134}, + 11: {From: 0xf3, To: 0xe3}, + 12: {From: 0xfd, To: 0x71}, + 13: {From: 0x104, To: 0x165}, + 14: {From: 0x12b, To: 0x127}, + 15: {From: 0x133, To: 0x7c}, + 16: {From: 0x13b, To: 0x13f}, + 17: {From: 0x142, To: 0x134}, + 18: {From: 0x15e, To: 0x15f}, + 19: {From: 0x164, To: 0x4b}, +} + +// m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +// codes indicating collections of regions. +// Size: 718 bytes, 359 elements +var m49 = [359]int16{ + // Entry 0 - 3F + 0, 1, 2, 3, 5, 9, 11, 13, + 14, 15, 17, 18, 19, 21, 29, 30, + 34, 35, 39, 53, 54, 57, 61, 142, + 143, 145, 150, 151, 154, 155, 202, 419, + 958, 0, 20, 784, 4, 28, 660, 8, + 51, 530, 24, 10, 32, 16, 40, 36, + 533, 248, 31, 70, 52, 50, 56, 854, + 100, 48, 108, 204, 652, 60, 96, 68, + // Entry 40 - 7F + 535, 76, 44, 64, 104, 74, 72, 112, + 84, 124, 166, 180, 140, 178, 756, 384, + 184, 152, 120, 156, 170, 0, 0, 188, + 891, 296, 192, 132, 531, 162, 196, 203, + 278, 276, 0, 262, 208, 212, 214, 204, + 12, 0, 218, 233, 818, 732, 232, 724, + 231, 967, 0, 246, 242, 238, 583, 234, + 0, 250, 249, 266, 826, 308, 268, 254, + // Entry 80 - BF + 831, 288, 292, 304, 270, 324, 312, 226, + 300, 239, 320, 316, 624, 328, 344, 334, + 340, 191, 332, 348, 854, 0, 360, 372, + 376, 833, 356, 86, 368, 364, 352, 380, + 832, 388, 400, 392, 581, 404, 417, 116, + 296, 174, 659, 408, 410, 414, 136, 398, + 418, 422, 662, 438, 144, 430, 426, 440, + 442, 428, 434, 504, 492, 498, 499, 663, + // Entry C0 - FF + 450, 584, 581, 807, 466, 104, 496, 446, + 580, 474, 478, 500, 470, 480, 462, 454, + 484, 458, 508, 516, 540, 562, 574, 566, + 548, 558, 528, 578, 524, 10, 520, 536, + 570, 554, 512, 591, 0, 604, 258, 598, + 608, 586, 616, 666, 612, 630, 275, 620, + 581, 585, 600, 591, 634, 959, 960, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + // Entry 100 - 13F + 970, 971, 972, 638, 716, 642, 688, 643, + 646, 682, 90, 690, 729, 752, 702, 654, + 705, 744, 703, 694, 674, 686, 706, 740, + 728, 678, 810, 222, 534, 760, 748, 0, + 796, 148, 260, 768, 764, 762, 772, 626, + 795, 788, 776, 626, 792, 780, 798, 158, + 834, 804, 800, 826, 581, 0, 840, 858, + 860, 336, 670, 704, 862, 92, 850, 704, + // Entry 140 - 17F + 548, 876, 581, 882, 973, 974, 975, 976, + 977, 978, 979, 980, 981, 982, 983, 984, + 985, 986, 987, 988, 989, 990, 991, 992, + 993, 994, 995, 996, 997, 998, 720, 887, + 175, 891, 710, 894, 180, 716, 999, +} + +// m49Index gives indexes into fromM49 based on the three most significant bits +// of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in +// +// fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +// +// for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +// The region code is stored in the 9 lsb of the indexed value. +// Size: 18 bytes, 9 elements +var m49Index = [9]int16{ + 0, 59, 108, 143, 181, 220, 259, 291, + 333, +} + +// fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details. +// Size: 666 bytes, 333 elements +var fromM49 = [333]uint16{ + // Entry 0 - 3F + 0x0201, 0x0402, 0x0603, 0x0824, 0x0a04, 0x1027, 0x1205, 0x142b, + 0x1606, 0x1868, 0x1a07, 0x1c08, 0x1e09, 0x202d, 0x220a, 0x240b, + 0x260c, 0x2822, 0x2a0d, 0x302a, 0x3825, 0x3a0e, 0x3c0f, 0x3e32, + 0x402c, 0x4410, 0x4611, 0x482f, 0x4e12, 0x502e, 0x5842, 0x6039, + 0x6435, 0x6628, 0x6834, 0x6a13, 0x6c14, 0x7036, 0x7215, 0x783d, + 0x7a16, 0x8043, 0x883f, 0x8c33, 0x9046, 0x9445, 0x9841, 0xa848, + 0xac9b, 0xb50a, 0xb93d, 0xc03e, 0xc838, 0xd0c5, 0xd83a, 0xe047, + 0xe8a7, 0xf052, 0xf849, 0x085b, 0x10ae, 0x184c, 0x1c17, 0x1e18, + // Entry 40 - 7F + 0x20b4, 0x2219, 0x2921, 0x2c1a, 0x2e1b, 0x3051, 0x341c, 0x361d, + 0x3853, 0x3d2f, 0x445d, 0x4c4a, 0x5454, 0x5ca9, 0x5f60, 0x644d, + 0x684b, 0x7050, 0x7857, 0x7e91, 0x805a, 0x885e, 0x941e, 0x965f, + 0x983b, 0xa064, 0xa865, 0xac66, 0xb46a, 0xbd1b, 0xc487, 0xcc70, + 0xce70, 0xd06e, 0xd26b, 0xd477, 0xdc75, 0xde89, 0xe474, 0xec73, + 0xf031, 0xf27a, 0xf479, 0xfc7f, 0x04e6, 0x0922, 0x0c63, 0x147b, + 0x187e, 0x1c84, 0x26ee, 0x2861, 0x2c60, 0x3061, 0x4081, 0x4882, + 0x50a8, 0x5888, 0x6083, 0x687d, 0x7086, 0x788b, 0x808a, 0x8885, + // Entry 80 - BF + 0x908d, 0x9892, 0x9c8f, 0xa139, 0xa890, 0xb08e, 0xb893, 0xc09e, + 0xc89a, 0xd096, 0xd89d, 0xe09c, 0xe897, 0xf098, 0xf89f, 0x004f, + 0x08a1, 0x10a3, 0x1caf, 0x20a2, 0x28a5, 0x30ab, 0x34ac, 0x3cad, + 0x42a6, 0x44b0, 0x461f, 0x4cb1, 0x54b6, 0x58b9, 0x5cb5, 0x64ba, + 0x6cb3, 0x70b7, 0x74b8, 0x7cc7, 0x84c0, 0x8ccf, 0x94d1, 0x9cce, + 0xa4c4, 0xaccc, 0xb4c9, 0xbcca, 0xc0cd, 0xc8d0, 0xd8bc, 0xe0c6, + 0xe4bd, 0xe6be, 0xe8cb, 0xf0bb, 0xf8d2, 0x00e2, 0x08d3, 0x10de, + 0x18dc, 0x20da, 0x2429, 0x265c, 0x2a30, 0x2d1c, 0x2e40, 0x30df, + // Entry C0 - FF + 0x38d4, 0x4940, 0x54e1, 0x5cd9, 0x64d5, 0x6cd7, 0x74e0, 0x7cd6, + 0x84db, 0x88c8, 0x8b34, 0x8e76, 0x90c1, 0x92f1, 0x94e9, 0x9ee3, + 0xace7, 0xb0f2, 0xb8e5, 0xc0e8, 0xc8ec, 0xd0ea, 0xd8ef, 0xe08c, + 0xe527, 0xeced, 0xf4f4, 0xfd03, 0x0505, 0x0707, 0x0d08, 0x183c, + 0x1d0f, 0x26aa, 0x2826, 0x2cb2, 0x2ebf, 0x34eb, 0x3d3a, 0x4514, + 0x4d19, 0x5509, 0x5d15, 0x6106, 0x650b, 0x6d13, 0x7d0e, 0x7f12, + 0x813f, 0x8310, 0x8516, 0x8d62, 0x9965, 0xa15e, 0xa86f, 0xb118, + 0xb30c, 0xb86d, 0xc10c, 0xc917, 0xd111, 0xd91e, 0xe10d, 0xe84e, + // Entry 100 - 13F + 0xf11d, 0xf525, 0xf924, 0x0123, 0x0926, 0x112a, 0x192d, 0x2023, + 0x2929, 0x312c, 0x3728, 0x3920, 0x3d2e, 0x4132, 0x4931, 0x4ec3, + 0x551a, 0x646c, 0x747c, 0x7e80, 0x80a0, 0x8299, 0x8530, 0x9136, + 0xa53e, 0xac37, 0xb537, 0xb938, 0xbd3c, 0xd941, 0xe543, 0xed5f, + 0xef5f, 0xf658, 0xfd63, 0x7c20, 0x7ef5, 0x80f6, 0x82f7, 0x84f8, + 0x86f9, 0x88fa, 0x8afb, 0x8cfc, 0x8e71, 0x90fe, 0x92ff, 0x9500, + 0x9701, 0x9902, 0x9b44, 0x9d45, 0x9f46, 0xa147, 0xa348, 0xa549, + 0xa74a, 0xa94b, 0xab4c, 0xad4d, 0xaf4e, 0xb14f, 0xb350, 0xb551, + // Entry 140 - 17F + 0xb752, 0xb953, 0xbb54, 0xbd55, 0xbf56, 0xc157, 0xc358, 0xc559, + 0xc75a, 0xc95b, 0xcb5c, 0xcd5d, 0xcf66, +} + +// Size: 2128 bytes +var variantIndex = map[string]uint8{ + "1606nict": 0x0, + "1694acad": 0x1, + "1901": 0x2, + "1959acad": 0x3, + "1994": 0x67, + "1996": 0x4, + "abl1943": 0x5, + "akuapem": 0x6, + "alalc97": 0x69, + "aluku": 0x7, + "ao1990": 0x8, + "aranes": 0x9, + "arevela": 0xa, + "arevmda": 0xb, + "arkaika": 0xc, + "asante": 0xd, + "auvern": 0xe, + "baku1926": 0xf, + "balanka": 0x10, + "barla": 0x11, + "basiceng": 0x12, + "bauddha": 0x13, + "bciav": 0x14, + "bcizbl": 0x15, + "biscayan": 0x16, + "biske": 0x62, + "bohoric": 0x17, + "boont": 0x18, + "bornholm": 0x19, + "cisaup": 0x1a, + "colb1945": 0x1b, + "cornu": 0x1c, + "creiss": 0x1d, + "dajnko": 0x1e, + "ekavsk": 0x1f, + "emodeng": 0x20, + "fonipa": 0x6a, + "fonkirsh": 0x6b, + "fonnapa": 0x6c, + "fonupa": 0x6d, + "fonxsamp": 0x6e, + "gallo": 0x21, + "gascon": 0x22, + "grclass": 0x23, + "grital": 0x24, + "grmistr": 0x25, + "hepburn": 0x26, + "heploc": 0x68, + "hognorsk": 0x27, + "hsistemo": 0x28, + "ijekavsk": 0x29, + "itihasa": 0x2a, + "ivanchov": 0x2b, + "jauer": 0x2c, + "jyutping": 0x2d, + "kkcor": 0x2e, + "kociewie": 0x2f, + "kscor": 0x30, + "laukika": 0x31, + "lemosin": 0x32, + "lengadoc": 0x33, + "lipaw": 0x63, + "ltg1929": 0x34, + "ltg2007": 0x35, + "luna1918": 0x36, + "metelko": 0x37, + "monoton": 0x38, + "ndyuka": 0x39, + "nedis": 0x3a, + "newfound": 0x3b, + "nicard": 0x3c, + "njiva": 0x64, + "nulik": 0x3d, + "osojs": 0x65, + "oxendict": 0x3e, + "pahawh2": 0x3f, + "pahawh3": 0x40, + "pahawh4": 0x41, + "pamaka": 0x42, + "peano": 0x43, + "petr1708": 0x44, + "pinyin": 0x45, + "polyton": 0x46, + "provenc": 0x47, + "puter": 0x48, + "rigik": 0x49, + "rozaj": 0x4a, + "rumgr": 0x4b, + "scotland": 0x4c, + "scouse": 0x4d, + "simple": 0x6f, + "solba": 0x66, + "sotav": 0x4e, + "spanglis": 0x4f, + "surmiran": 0x50, + "sursilv": 0x51, + "sutsilv": 0x52, + "synnejyl": 0x53, + "tarask": 0x54, + "tongyong": 0x55, + "tunumiit": 0x56, + "uccor": 0x57, + "ucrcor": 0x58, + "ulster": 0x59, + "unifon": 0x5a, + "vaidika": 0x5b, + "valencia": 0x5c, + "vallader": 0x5d, + "vecdruka": 0x5e, + "vivaraup": 0x5f, + "wadegile": 0x60, + "xsistemo": 0x61, +} + +// variantNumSpecialized is the number of specialized variants in variants. +const variantNumSpecialized = 105 + +// nRegionGroups is the number of region groups. +const nRegionGroups = 33 + +type likelyLangRegion struct { + lang uint16 + region uint16 +} + +// likelyScript is a lookup table, indexed by scriptID, for the most likely +// languages and regions given a script. +// Size: 1052 bytes, 263 elements +var likelyScript = [263]likelyLangRegion{ + 1: {lang: 0x14e, region: 0x85}, + 3: {lang: 0x2a2, region: 0x107}, + 4: {lang: 0x1f, region: 0x9a}, + 5: {lang: 0x3a, region: 0x6c}, + 7: {lang: 0x3b, region: 0x9d}, + 8: {lang: 0x1d7, region: 0x28}, + 9: {lang: 0x13, region: 0x9d}, + 10: {lang: 0x5b, region: 0x96}, + 11: {lang: 0x60, region: 0x52}, + 12: {lang: 0xb9, region: 0xb5}, + 13: {lang: 0x63, region: 0x96}, + 14: {lang: 0xa5, region: 0x35}, + 15: {lang: 0x3e9, region: 0x9a}, + 17: {lang: 0x529, region: 0x12f}, + 18: {lang: 0x3b1, region: 0x9a}, + 19: {lang: 0x15e, region: 0x79}, + 20: {lang: 0xc2, region: 0x96}, + 21: {lang: 0x9d, region: 0xe8}, + 22: {lang: 0xdb, region: 0x35}, + 23: {lang: 0xf3, region: 0x49}, + 24: {lang: 0x4f0, region: 0x12c}, + 25: {lang: 0xe7, region: 0x13f}, + 26: {lang: 0xe5, region: 0x136}, + 29: {lang: 0xf1, region: 0x6c}, + 31: {lang: 0x1a0, region: 0x5e}, + 32: {lang: 0x3e2, region: 0x107}, + 34: {lang: 0x1be, region: 0x9a}, + 38: {lang: 0x15e, region: 0x79}, + 41: {lang: 0x133, region: 0x6c}, + 42: {lang: 0x431, region: 0x27}, + 44: {lang: 0x27, region: 0x70}, + 46: {lang: 0x210, region: 0x7e}, + 47: {lang: 0xfe, region: 0x38}, + 49: {lang: 0x19b, region: 0x9a}, + 50: {lang: 0x19e, region: 0x131}, + 51: {lang: 0x3e9, region: 0x9a}, + 52: {lang: 0x136, region: 0x88}, + 53: {lang: 0x1a4, region: 0x9a}, + 54: {lang: 0x39d, region: 0x9a}, + 55: {lang: 0x529, region: 0x12f}, + 56: {lang: 0x254, region: 0xac}, + 57: {lang: 0x529, region: 0x53}, + 58: {lang: 0x1cb, region: 0xe8}, + 59: {lang: 0x529, region: 0x53}, + 60: {lang: 0x529, region: 0x12f}, + 61: {lang: 0x2fd, region: 0x9c}, + 62: {lang: 0x1bc, region: 0x98}, + 63: {lang: 0x200, region: 0xa3}, + 64: {lang: 0x1c5, region: 0x12c}, + 65: {lang: 0x1ca, region: 0xb0}, + 68: {lang: 0x1d5, region: 0x93}, + 70: {lang: 0x142, region: 0x9f}, + 71: {lang: 0x254, region: 0xac}, + 72: {lang: 0x20e, region: 0x96}, + 73: {lang: 0x200, region: 0xa3}, + 75: {lang: 0x135, region: 0xc5}, + 76: {lang: 0x200, region: 0xa3}, + 78: {lang: 0x3bb, region: 0xe9}, + 79: {lang: 0x24a, region: 0xa7}, + 80: {lang: 0x3fa, region: 0x9a}, + 83: {lang: 0x251, region: 0x9a}, + 84: {lang: 0x254, region: 0xac}, + 86: {lang: 0x88, region: 0x9a}, + 87: {lang: 0x370, region: 0x124}, + 88: {lang: 0x2b8, region: 0xb0}, + 93: {lang: 0x29f, region: 0x9a}, + 94: {lang: 0x2a8, region: 0x9a}, + 95: {lang: 0x28f, region: 0x88}, + 96: {lang: 0x1a0, region: 0x88}, + 97: {lang: 0x2ac, region: 0x53}, + 99: {lang: 0x4f4, region: 0x12c}, + 100: {lang: 0x4f5, region: 0x12c}, + 101: {lang: 0x1be, region: 0x9a}, + 103: {lang: 0x337, region: 0x9d}, + 104: {lang: 0x4f7, region: 0x53}, + 105: {lang: 0xa9, region: 0x53}, + 108: {lang: 0x2e8, region: 0x113}, + 109: {lang: 0x4f8, region: 0x10c}, + 110: {lang: 0x4f8, region: 0x10c}, + 111: {lang: 0x304, region: 0x9a}, + 112: {lang: 0x31b, region: 0x9a}, + 113: {lang: 0x30b, region: 0x53}, + 115: {lang: 0x31e, region: 0x35}, + 116: {lang: 0x30e, region: 0x9a}, + 117: {lang: 0x414, region: 0xe9}, + 118: {lang: 0x331, region: 0xc5}, + 121: {lang: 0x4f9, region: 0x109}, + 122: {lang: 0x3b, region: 0xa2}, + 123: {lang: 0x353, region: 0xdc}, + 126: {lang: 0x2d0, region: 0x85}, + 127: {lang: 0x52a, region: 0x53}, + 128: {lang: 0x403, region: 0x97}, + 129: {lang: 0x3ee, region: 0x9a}, + 130: {lang: 0x39b, region: 0xc6}, + 131: {lang: 0x395, region: 0x9a}, + 132: {lang: 0x399, region: 0x136}, + 133: {lang: 0x429, region: 0x116}, + 135: {lang: 0x3b, region: 0x11d}, + 136: {lang: 0xfd, region: 0xc5}, + 139: {lang: 0x27d, region: 0x107}, + 140: {lang: 0x2c9, region: 0x53}, + 141: {lang: 0x39f, region: 0x9d}, + 142: {lang: 0x39f, region: 0x53}, + 144: {lang: 0x3ad, region: 0xb1}, + 146: {lang: 0x1c6, region: 0x53}, + 147: {lang: 0x4fd, region: 0x9d}, + 200: {lang: 0x3cb, region: 0x96}, + 203: {lang: 0x372, region: 0x10d}, + 204: {lang: 0x420, region: 0x98}, + 206: {lang: 0x4ff, region: 0x15f}, + 207: {lang: 0x3f0, region: 0x9a}, + 208: {lang: 0x45, region: 0x136}, + 209: {lang: 0x139, region: 0x7c}, + 210: {lang: 0x3e9, region: 0x9a}, + 212: {lang: 0x3e9, region: 0x9a}, + 213: {lang: 0x3fa, region: 0x9a}, + 214: {lang: 0x40c, region: 0xb4}, + 217: {lang: 0x433, region: 0x9a}, + 218: {lang: 0xef, region: 0xc6}, + 219: {lang: 0x43e, region: 0x96}, + 221: {lang: 0x44d, region: 0x35}, + 222: {lang: 0x44e, region: 0x9c}, + 226: {lang: 0x45a, region: 0xe8}, + 227: {lang: 0x11a, region: 0x9a}, + 228: {lang: 0x45e, region: 0x53}, + 229: {lang: 0x232, region: 0x53}, + 230: {lang: 0x450, region: 0x9a}, + 231: {lang: 0x4a5, region: 0x53}, + 232: {lang: 0x9f, region: 0x13f}, + 233: {lang: 0x461, region: 0x9a}, + 235: {lang: 0x528, region: 0xbb}, + 236: {lang: 0x153, region: 0xe8}, + 237: {lang: 0x128, region: 0xce}, + 238: {lang: 0x46b, region: 0x124}, + 239: {lang: 0xa9, region: 0x53}, + 240: {lang: 0x2ce, region: 0x9a}, + 243: {lang: 0x4ad, region: 0x11d}, + 244: {lang: 0x4be, region: 0xb5}, + 247: {lang: 0x1ce, region: 0x9a}, + 250: {lang: 0x3a9, region: 0x9d}, + 251: {lang: 0x22, region: 0x9c}, + 253: {lang: 0x1ea, region: 0x53}, + 254: {lang: 0xef, region: 0xc6}, +} + +type likelyScriptRegion struct { + region uint16 + script uint16 + flags uint8 +} + +// likelyLang is a lookup table, indexed by langID, for the most likely +// scripts and regions given incomplete information. If more entries exist for a +// given language, region and script are the index and size respectively +// of the list in likelyLangList. +// Size: 7980 bytes, 1330 elements +var likelyLang = [1330]likelyScriptRegion{ + 0: {region: 0x136, script: 0x5b, flags: 0x0}, + 1: {region: 0x70, script: 0x5b, flags: 0x0}, + 2: {region: 0x166, script: 0x5b, flags: 0x0}, + 3: {region: 0x166, script: 0x5b, flags: 0x0}, + 4: {region: 0x166, script: 0x5b, flags: 0x0}, + 5: {region: 0x7e, script: 0x20, flags: 0x0}, + 6: {region: 0x166, script: 0x5b, flags: 0x0}, + 7: {region: 0x166, script: 0x20, flags: 0x0}, + 8: {region: 0x81, script: 0x5b, flags: 0x0}, + 9: {region: 0x166, script: 0x5b, flags: 0x0}, + 10: {region: 0x166, script: 0x5b, flags: 0x0}, + 11: {region: 0x166, script: 0x5b, flags: 0x0}, + 12: {region: 0x96, script: 0x5b, flags: 0x0}, + 13: {region: 0x132, script: 0x5b, flags: 0x0}, + 14: {region: 0x81, script: 0x5b, flags: 0x0}, + 15: {region: 0x166, script: 0x5b, flags: 0x0}, + 16: {region: 0x166, script: 0x5b, flags: 0x0}, + 17: {region: 0x107, script: 0x20, flags: 0x0}, + 18: {region: 0x166, script: 0x5b, flags: 0x0}, + 19: {region: 0x9d, script: 0x9, flags: 0x0}, + 20: {region: 0x129, script: 0x5, flags: 0x0}, + 21: {region: 0x166, script: 0x5b, flags: 0x0}, + 22: {region: 0x162, script: 0x5b, flags: 0x0}, + 23: {region: 0x166, script: 0x5b, flags: 0x0}, + 24: {region: 0x166, script: 0x5b, flags: 0x0}, + 25: {region: 0x166, script: 0x5b, flags: 0x0}, + 26: {region: 0x166, script: 0x5b, flags: 0x0}, + 27: {region: 0x166, script: 0x5b, flags: 0x0}, + 28: {region: 0x52, script: 0x5b, flags: 0x0}, + 29: {region: 0x166, script: 0x5b, flags: 0x0}, + 30: {region: 0x166, script: 0x5b, flags: 0x0}, + 31: {region: 0x9a, script: 0x4, flags: 0x0}, + 32: {region: 0x166, script: 0x5b, flags: 0x0}, + 33: {region: 0x81, script: 0x5b, flags: 0x0}, + 34: {region: 0x9c, script: 0xfb, flags: 0x0}, + 35: {region: 0x166, script: 0x5b, flags: 0x0}, + 36: {region: 0x166, script: 0x5b, flags: 0x0}, + 37: {region: 0x14e, script: 0x5b, flags: 0x0}, + 38: {region: 0x107, script: 0x20, flags: 0x0}, + 39: {region: 0x70, script: 0x2c, flags: 0x0}, + 40: {region: 0x166, script: 0x5b, flags: 0x0}, + 41: {region: 0x166, script: 0x5b, flags: 0x0}, + 42: {region: 0xd7, script: 0x5b, flags: 0x0}, + 43: {region: 0x166, script: 0x5b, flags: 0x0}, + 45: {region: 0x166, script: 0x5b, flags: 0x0}, + 46: {region: 0x166, script: 0x5b, flags: 0x0}, + 47: {region: 0x166, script: 0x5b, flags: 0x0}, + 48: {region: 0x166, script: 0x5b, flags: 0x0}, + 49: {region: 0x166, script: 0x5b, flags: 0x0}, + 50: {region: 0x166, script: 0x5b, flags: 0x0}, + 51: {region: 0x96, script: 0x5b, flags: 0x0}, + 52: {region: 0x166, script: 0x5, flags: 0x0}, + 53: {region: 0x123, script: 0x5, flags: 0x0}, + 54: {region: 0x166, script: 0x5b, flags: 0x0}, + 55: {region: 0x166, script: 0x5b, flags: 0x0}, + 56: {region: 0x166, script: 0x5b, flags: 0x0}, + 57: {region: 0x166, script: 0x5b, flags: 0x0}, + 58: {region: 0x6c, script: 0x5, flags: 0x0}, + 59: {region: 0x0, script: 0x3, flags: 0x1}, + 60: {region: 0x166, script: 0x5b, flags: 0x0}, + 61: {region: 0x51, script: 0x5b, flags: 0x0}, + 62: {region: 0x3f, script: 0x5b, flags: 0x0}, + 63: {region: 0x68, script: 0x5, flags: 0x0}, + 65: {region: 0xbb, script: 0x5, flags: 0x0}, + 66: {region: 0x6c, script: 0x5, flags: 0x0}, + 67: {region: 0x9a, script: 0xe, flags: 0x0}, + 68: {region: 0x130, script: 0x5b, flags: 0x0}, + 69: {region: 0x136, script: 0xd0, flags: 0x0}, + 70: {region: 0x166, script: 0x5b, flags: 0x0}, + 71: {region: 0x166, script: 0x5b, flags: 0x0}, + 72: {region: 0x6f, script: 0x5b, flags: 0x0}, + 73: {region: 0x166, script: 0x5b, flags: 0x0}, + 74: {region: 0x166, script: 0x5b, flags: 0x0}, + 75: {region: 0x49, script: 0x5b, flags: 0x0}, + 76: {region: 0x166, script: 0x5b, flags: 0x0}, + 77: {region: 0x107, script: 0x20, flags: 0x0}, + 78: {region: 0x166, script: 0x5, flags: 0x0}, + 79: {region: 0x166, script: 0x5b, flags: 0x0}, + 80: {region: 0x166, script: 0x5b, flags: 0x0}, + 81: {region: 0x166, script: 0x5b, flags: 0x0}, + 82: {region: 0x9a, script: 0x22, flags: 0x0}, + 83: {region: 0x166, script: 0x5b, flags: 0x0}, + 84: {region: 0x166, script: 0x5b, flags: 0x0}, + 85: {region: 0x166, script: 0x5b, flags: 0x0}, + 86: {region: 0x3f, script: 0x5b, flags: 0x0}, + 87: {region: 0x166, script: 0x5b, flags: 0x0}, + 88: {region: 0x3, script: 0x5, flags: 0x1}, + 89: {region: 0x107, script: 0x20, flags: 0x0}, + 90: {region: 0xe9, script: 0x5, flags: 0x0}, + 91: {region: 0x96, script: 0x5b, flags: 0x0}, + 92: {region: 0xdc, script: 0x22, flags: 0x0}, + 93: {region: 0x2e, script: 0x5b, flags: 0x0}, + 94: {region: 0x52, script: 0x5b, flags: 0x0}, + 95: {region: 0x166, script: 0x5b, flags: 0x0}, + 96: {region: 0x52, script: 0xb, flags: 0x0}, + 97: {region: 0x166, script: 0x5b, flags: 0x0}, + 98: {region: 0x166, script: 0x5b, flags: 0x0}, + 99: {region: 0x96, script: 0x5b, flags: 0x0}, + 100: {region: 0x166, script: 0x5b, flags: 0x0}, + 101: {region: 0x52, script: 0x5b, flags: 0x0}, + 102: {region: 0x166, script: 0x5b, flags: 0x0}, + 103: {region: 0x166, script: 0x5b, flags: 0x0}, + 104: {region: 0x166, script: 0x5b, flags: 0x0}, + 105: {region: 0x166, script: 0x5b, flags: 0x0}, + 106: {region: 0x4f, script: 0x5b, flags: 0x0}, + 107: {region: 0x166, script: 0x5b, flags: 0x0}, + 108: {region: 0x166, script: 0x5b, flags: 0x0}, + 109: {region: 0x166, script: 0x5b, flags: 0x0}, + 110: {region: 0x166, script: 0x2c, flags: 0x0}, + 111: {region: 0x166, script: 0x5b, flags: 0x0}, + 112: {region: 0x166, script: 0x5b, flags: 0x0}, + 113: {region: 0x47, script: 0x20, flags: 0x0}, + 114: {region: 0x166, script: 0x5b, flags: 0x0}, + 115: {region: 0x166, script: 0x5b, flags: 0x0}, + 116: {region: 0x10c, script: 0x5, flags: 0x0}, + 117: {region: 0x163, script: 0x5b, flags: 0x0}, + 118: {region: 0x166, script: 0x5b, flags: 0x0}, + 119: {region: 0x96, script: 0x5b, flags: 0x0}, + 120: {region: 0x166, script: 0x5b, flags: 0x0}, + 121: {region: 0x130, script: 0x5b, flags: 0x0}, + 122: {region: 0x52, script: 0x5b, flags: 0x0}, + 123: {region: 0x9a, script: 0xe6, flags: 0x0}, + 124: {region: 0xe9, script: 0x5, flags: 0x0}, + 125: {region: 0x9a, script: 0x22, flags: 0x0}, + 126: {region: 0x38, script: 0x20, flags: 0x0}, + 127: {region: 0x9a, script: 0x22, flags: 0x0}, + 128: {region: 0xe9, script: 0x5, flags: 0x0}, + 129: {region: 0x12c, script: 0x34, flags: 0x0}, + 131: {region: 0x9a, script: 0x22, flags: 0x0}, + 132: {region: 0x166, script: 0x5b, flags: 0x0}, + 133: {region: 0x9a, script: 0x22, flags: 0x0}, + 134: {region: 0xe8, script: 0x5b, flags: 0x0}, + 135: {region: 0x166, script: 0x5b, flags: 0x0}, + 136: {region: 0x9a, script: 0x22, flags: 0x0}, + 137: {region: 0x166, script: 0x5b, flags: 0x0}, + 138: {region: 0x140, script: 0x5b, flags: 0x0}, + 139: {region: 0x166, script: 0x5b, flags: 0x0}, + 140: {region: 0x166, script: 0x5b, flags: 0x0}, + 141: {region: 0xe8, script: 0x5b, flags: 0x0}, + 142: {region: 0x166, script: 0x5b, flags: 0x0}, + 143: {region: 0xd7, script: 0x5b, flags: 0x0}, + 144: {region: 0x166, script: 0x5b, flags: 0x0}, + 145: {region: 0x166, script: 0x5b, flags: 0x0}, + 146: {region: 0x166, script: 0x5b, flags: 0x0}, + 147: {region: 0x166, script: 0x2c, flags: 0x0}, + 148: {region: 0x9a, script: 0x22, flags: 0x0}, + 149: {region: 0x96, script: 0x5b, flags: 0x0}, + 150: {region: 0x166, script: 0x5b, flags: 0x0}, + 151: {region: 0x166, script: 0x5b, flags: 0x0}, + 152: {region: 0x115, script: 0x5b, flags: 0x0}, + 153: {region: 0x166, script: 0x5b, flags: 0x0}, + 154: {region: 0x166, script: 0x5b, flags: 0x0}, + 155: {region: 0x52, script: 0x5b, flags: 0x0}, + 156: {region: 0x166, script: 0x5b, flags: 0x0}, + 157: {region: 0xe8, script: 0x5b, flags: 0x0}, + 158: {region: 0x166, script: 0x5b, flags: 0x0}, + 159: {region: 0x13f, script: 0xe8, flags: 0x0}, + 160: {region: 0xc4, script: 0x5b, flags: 0x0}, + 161: {region: 0x166, script: 0x5b, flags: 0x0}, + 162: {region: 0x166, script: 0x5b, flags: 0x0}, + 163: {region: 0xc4, script: 0x5b, flags: 0x0}, + 164: {region: 0x166, script: 0x5b, flags: 0x0}, + 165: {region: 0x35, script: 0xe, flags: 0x0}, + 166: {region: 0x166, script: 0x5b, flags: 0x0}, + 167: {region: 0x166, script: 0x5b, flags: 0x0}, + 168: {region: 0x166, script: 0x5b, flags: 0x0}, + 169: {region: 0x53, script: 0xef, flags: 0x0}, + 170: {region: 0x166, script: 0x5b, flags: 0x0}, + 171: {region: 0x166, script: 0x5b, flags: 0x0}, + 172: {region: 0x166, script: 0x5b, flags: 0x0}, + 173: {region: 0x9a, script: 0xe, flags: 0x0}, + 174: {region: 0x166, script: 0x5b, flags: 0x0}, + 175: {region: 0x9d, script: 0x5, flags: 0x0}, + 176: {region: 0x166, script: 0x5b, flags: 0x0}, + 177: {region: 0x4f, script: 0x5b, flags: 0x0}, + 178: {region: 0x79, script: 0x5b, flags: 0x0}, + 179: {region: 0x9a, script: 0x22, flags: 0x0}, + 180: {region: 0xe9, script: 0x5, flags: 0x0}, + 181: {region: 0x9a, script: 0x22, flags: 0x0}, + 182: {region: 0x166, script: 0x5b, flags: 0x0}, + 183: {region: 0x33, script: 0x5b, flags: 0x0}, + 184: {region: 0x166, script: 0x5b, flags: 0x0}, + 185: {region: 0xb5, script: 0xc, flags: 0x0}, + 186: {region: 0x52, script: 0x5b, flags: 0x0}, + 187: {region: 0x166, script: 0x2c, flags: 0x0}, + 188: {region: 0xe8, script: 0x5b, flags: 0x0}, + 189: {region: 0x166, script: 0x5b, flags: 0x0}, + 190: {region: 0xe9, script: 0x22, flags: 0x0}, + 191: {region: 0x107, script: 0x20, flags: 0x0}, + 192: {region: 0x160, script: 0x5b, flags: 0x0}, + 193: {region: 0x166, script: 0x5b, flags: 0x0}, + 194: {region: 0x96, script: 0x5b, flags: 0x0}, + 195: {region: 0x166, script: 0x5b, flags: 0x0}, + 196: {region: 0x52, script: 0x5b, flags: 0x0}, + 197: {region: 0x166, script: 0x5b, flags: 0x0}, + 198: {region: 0x166, script: 0x5b, flags: 0x0}, + 199: {region: 0x166, script: 0x5b, flags: 0x0}, + 200: {region: 0x87, script: 0x5b, flags: 0x0}, + 201: {region: 0x166, script: 0x5b, flags: 0x0}, + 202: {region: 0x166, script: 0x5b, flags: 0x0}, + 203: {region: 0x166, script: 0x5b, flags: 0x0}, + 204: {region: 0x166, script: 0x5b, flags: 0x0}, + 205: {region: 0x6e, script: 0x2c, flags: 0x0}, + 206: {region: 0x166, script: 0x5b, flags: 0x0}, + 207: {region: 0x166, script: 0x5b, flags: 0x0}, + 208: {region: 0x52, script: 0x5b, flags: 0x0}, + 209: {region: 0x166, script: 0x5b, flags: 0x0}, + 210: {region: 0x166, script: 0x5b, flags: 0x0}, + 211: {region: 0xc4, script: 0x5b, flags: 0x0}, + 212: {region: 0x166, script: 0x5b, flags: 0x0}, + 213: {region: 0x166, script: 0x5b, flags: 0x0}, + 214: {region: 0x166, script: 0x5b, flags: 0x0}, + 215: {region: 0x6f, script: 0x5b, flags: 0x0}, + 216: {region: 0x166, script: 0x5b, flags: 0x0}, + 217: {region: 0x166, script: 0x5b, flags: 0x0}, + 218: {region: 0xd7, script: 0x5b, flags: 0x0}, + 219: {region: 0x35, script: 0x16, flags: 0x0}, + 220: {region: 0x107, script: 0x20, flags: 0x0}, + 221: {region: 0xe8, script: 0x5b, flags: 0x0}, + 222: {region: 0x166, script: 0x5b, flags: 0x0}, + 223: {region: 0x132, script: 0x5b, flags: 0x0}, + 224: {region: 0x8b, script: 0x5b, flags: 0x0}, + 225: {region: 0x76, script: 0x5b, flags: 0x0}, + 226: {region: 0x107, script: 0x20, flags: 0x0}, + 227: {region: 0x136, script: 0x5b, flags: 0x0}, + 228: {region: 0x49, script: 0x5b, flags: 0x0}, + 229: {region: 0x136, script: 0x1a, flags: 0x0}, + 230: {region: 0xa7, script: 0x5, flags: 0x0}, + 231: {region: 0x13f, script: 0x19, flags: 0x0}, + 232: {region: 0x166, script: 0x5b, flags: 0x0}, + 233: {region: 0x9c, script: 0x5, flags: 0x0}, + 234: {region: 0x166, script: 0x5b, flags: 0x0}, + 235: {region: 0x166, script: 0x5b, flags: 0x0}, + 236: {region: 0x166, script: 0x5b, flags: 0x0}, + 237: {region: 0x166, script: 0x5b, flags: 0x0}, + 238: {region: 0x166, script: 0x5b, flags: 0x0}, + 239: {region: 0xc6, script: 0xda, flags: 0x0}, + 240: {region: 0x79, script: 0x5b, flags: 0x0}, + 241: {region: 0x6c, script: 0x1d, flags: 0x0}, + 242: {region: 0xe8, script: 0x5b, flags: 0x0}, + 243: {region: 0x49, script: 0x17, flags: 0x0}, + 244: {region: 0x131, script: 0x20, flags: 0x0}, + 245: {region: 0x49, script: 0x17, flags: 0x0}, + 246: {region: 0x49, script: 0x17, flags: 0x0}, + 247: {region: 0x49, script: 0x17, flags: 0x0}, + 248: {region: 0x49, script: 0x17, flags: 0x0}, + 249: {region: 0x10b, script: 0x5b, flags: 0x0}, + 250: {region: 0x5f, script: 0x5b, flags: 0x0}, + 251: {region: 0xea, script: 0x5b, flags: 0x0}, + 252: {region: 0x49, script: 0x17, flags: 0x0}, + 253: {region: 0xc5, script: 0x88, flags: 0x0}, + 254: {region: 0x8, script: 0x2, flags: 0x1}, + 255: {region: 0x107, script: 0x20, flags: 0x0}, + 256: {region: 0x7c, script: 0x5b, flags: 0x0}, + 257: {region: 0x64, script: 0x5b, flags: 0x0}, + 258: {region: 0x166, script: 0x5b, flags: 0x0}, + 259: {region: 0x166, script: 0x5b, flags: 0x0}, + 260: {region: 0x166, script: 0x5b, flags: 0x0}, + 261: {region: 0x166, script: 0x5b, flags: 0x0}, + 262: {region: 0x136, script: 0x5b, flags: 0x0}, + 263: {region: 0x107, script: 0x20, flags: 0x0}, + 264: {region: 0xa5, script: 0x5b, flags: 0x0}, + 265: {region: 0x166, script: 0x5b, flags: 0x0}, + 266: {region: 0x166, script: 0x5b, flags: 0x0}, + 267: {region: 0x9a, script: 0x5, flags: 0x0}, + 268: {region: 0x166, script: 0x5b, flags: 0x0}, + 269: {region: 0x61, script: 0x5b, flags: 0x0}, + 270: {region: 0x166, script: 0x5b, flags: 0x0}, + 271: {region: 0x49, script: 0x5b, flags: 0x0}, + 272: {region: 0x166, script: 0x5b, flags: 0x0}, + 273: {region: 0x166, script: 0x5b, flags: 0x0}, + 274: {region: 0x166, script: 0x5b, flags: 0x0}, + 275: {region: 0x166, script: 0x5, flags: 0x0}, + 276: {region: 0x49, script: 0x5b, flags: 0x0}, + 277: {region: 0x166, script: 0x5b, flags: 0x0}, + 278: {region: 0x166, script: 0x5b, flags: 0x0}, + 279: {region: 0xd5, script: 0x5b, flags: 0x0}, + 280: {region: 0x4f, script: 0x5b, flags: 0x0}, + 281: {region: 0x166, script: 0x5b, flags: 0x0}, + 282: {region: 0x9a, script: 0x5, flags: 0x0}, + 283: {region: 0x166, script: 0x5b, flags: 0x0}, + 284: {region: 0x166, script: 0x5b, flags: 0x0}, + 285: {region: 0x166, script: 0x5b, flags: 0x0}, + 286: {region: 0x166, script: 0x2c, flags: 0x0}, + 287: {region: 0x61, script: 0x5b, flags: 0x0}, + 288: {region: 0xc4, script: 0x5b, flags: 0x0}, + 289: {region: 0xd1, script: 0x5b, flags: 0x0}, + 290: {region: 0x166, script: 0x5b, flags: 0x0}, + 291: {region: 0xdc, script: 0x22, flags: 0x0}, + 292: {region: 0x52, script: 0x5b, flags: 0x0}, + 293: {region: 0x166, script: 0x5b, flags: 0x0}, + 294: {region: 0x166, script: 0x5b, flags: 0x0}, + 295: {region: 0x166, script: 0x5b, flags: 0x0}, + 296: {region: 0xce, script: 0xed, flags: 0x0}, + 297: {region: 0x166, script: 0x5b, flags: 0x0}, + 298: {region: 0x166, script: 0x5b, flags: 0x0}, + 299: {region: 0x115, script: 0x5b, flags: 0x0}, + 300: {region: 0x37, script: 0x5b, flags: 0x0}, + 301: {region: 0x43, script: 0xef, flags: 0x0}, + 302: {region: 0x166, script: 0x5b, flags: 0x0}, + 303: {region: 0xa5, script: 0x5b, flags: 0x0}, + 304: {region: 0x81, script: 0x5b, flags: 0x0}, + 305: {region: 0xd7, script: 0x5b, flags: 0x0}, + 306: {region: 0x9f, script: 0x5b, flags: 0x0}, + 307: {region: 0x6c, script: 0x29, flags: 0x0}, + 308: {region: 0x166, script: 0x5b, flags: 0x0}, + 309: {region: 0xc5, script: 0x4b, flags: 0x0}, + 310: {region: 0x88, script: 0x34, flags: 0x0}, + 311: {region: 0x166, script: 0x5b, flags: 0x0}, + 312: {region: 0x166, script: 0x5b, flags: 0x0}, + 313: {region: 0xa, script: 0x2, flags: 0x1}, + 314: {region: 0x166, script: 0x5b, flags: 0x0}, + 315: {region: 0x166, script: 0x5b, flags: 0x0}, + 316: {region: 0x1, script: 0x5b, flags: 0x0}, + 317: {region: 0x166, script: 0x5b, flags: 0x0}, + 318: {region: 0x6f, script: 0x5b, flags: 0x0}, + 319: {region: 0x136, script: 0x5b, flags: 0x0}, + 320: {region: 0x6b, script: 0x5b, flags: 0x0}, + 321: {region: 0x166, script: 0x5b, flags: 0x0}, + 322: {region: 0x9f, script: 0x46, flags: 0x0}, + 323: {region: 0x166, script: 0x5b, flags: 0x0}, + 324: {region: 0x166, script: 0x5b, flags: 0x0}, + 325: {region: 0x6f, script: 0x5b, flags: 0x0}, + 326: {region: 0x52, script: 0x5b, flags: 0x0}, + 327: {region: 0x6f, script: 0x5b, flags: 0x0}, + 328: {region: 0x9d, script: 0x5, flags: 0x0}, + 329: {region: 0x166, script: 0x5b, flags: 0x0}, + 330: {region: 0x166, script: 0x5b, flags: 0x0}, + 331: {region: 0x166, script: 0x5b, flags: 0x0}, + 332: {region: 0x166, script: 0x5b, flags: 0x0}, + 333: {region: 0x87, script: 0x5b, flags: 0x0}, + 334: {region: 0xc, script: 0x2, flags: 0x1}, + 335: {region: 0x166, script: 0x5b, flags: 0x0}, + 336: {region: 0xc4, script: 0x5b, flags: 0x0}, + 337: {region: 0x73, script: 0x5b, flags: 0x0}, + 338: {region: 0x10c, script: 0x5, flags: 0x0}, + 339: {region: 0xe8, script: 0x5b, flags: 0x0}, + 340: {region: 0x10d, script: 0x5b, flags: 0x0}, + 341: {region: 0x74, script: 0x5b, flags: 0x0}, + 342: {region: 0x166, script: 0x5b, flags: 0x0}, + 343: {region: 0x166, script: 0x5b, flags: 0x0}, + 344: {region: 0x77, script: 0x5b, flags: 0x0}, + 345: {region: 0x166, script: 0x5b, flags: 0x0}, + 346: {region: 0x3b, script: 0x5b, flags: 0x0}, + 347: {region: 0x166, script: 0x5b, flags: 0x0}, + 348: {region: 0x166, script: 0x5b, flags: 0x0}, + 349: {region: 0x166, script: 0x5b, flags: 0x0}, + 350: {region: 0x79, script: 0x5b, flags: 0x0}, + 351: {region: 0x136, script: 0x5b, flags: 0x0}, + 352: {region: 0x79, script: 0x5b, flags: 0x0}, + 353: {region: 0x61, script: 0x5b, flags: 0x0}, + 354: {region: 0x61, script: 0x5b, flags: 0x0}, + 355: {region: 0x52, script: 0x5, flags: 0x0}, + 356: {region: 0x141, script: 0x5b, flags: 0x0}, + 357: {region: 0x166, script: 0x5b, flags: 0x0}, + 358: {region: 0x85, script: 0x5b, flags: 0x0}, + 359: {region: 0x166, script: 0x5b, flags: 0x0}, + 360: {region: 0xd5, script: 0x5b, flags: 0x0}, + 361: {region: 0x9f, script: 0x5b, flags: 0x0}, + 362: {region: 0xd7, script: 0x5b, flags: 0x0}, + 363: {region: 0x166, script: 0x5b, flags: 0x0}, + 364: {region: 0x10c, script: 0x5b, flags: 0x0}, + 365: {region: 0xda, script: 0x5b, flags: 0x0}, + 366: {region: 0x97, script: 0x5b, flags: 0x0}, + 367: {region: 0x81, script: 0x5b, flags: 0x0}, + 368: {region: 0x166, script: 0x5b, flags: 0x0}, + 369: {region: 0xbd, script: 0x5b, flags: 0x0}, + 370: {region: 0x166, script: 0x5b, flags: 0x0}, + 371: {region: 0x166, script: 0x5b, flags: 0x0}, + 372: {region: 0x166, script: 0x5b, flags: 0x0}, + 373: {region: 0x53, script: 0x3b, flags: 0x0}, + 374: {region: 0x166, script: 0x5b, flags: 0x0}, + 375: {region: 0x96, script: 0x5b, flags: 0x0}, + 376: {region: 0x166, script: 0x5b, flags: 0x0}, + 377: {region: 0x166, script: 0x5b, flags: 0x0}, + 378: {region: 0x9a, script: 0x22, flags: 0x0}, + 379: {region: 0x166, script: 0x5b, flags: 0x0}, + 380: {region: 0x9d, script: 0x5, flags: 0x0}, + 381: {region: 0x7f, script: 0x5b, flags: 0x0}, + 382: {region: 0x7c, script: 0x5b, flags: 0x0}, + 383: {region: 0x166, script: 0x5b, flags: 0x0}, + 384: {region: 0x166, script: 0x5b, flags: 0x0}, + 385: {region: 0x166, script: 0x5b, flags: 0x0}, + 386: {region: 0x166, script: 0x5b, flags: 0x0}, + 387: {region: 0x166, script: 0x5b, flags: 0x0}, + 388: {region: 0x166, script: 0x5b, flags: 0x0}, + 389: {region: 0x70, script: 0x2c, flags: 0x0}, + 390: {region: 0x166, script: 0x5b, flags: 0x0}, + 391: {region: 0xdc, script: 0x22, flags: 0x0}, + 392: {region: 0x166, script: 0x5b, flags: 0x0}, + 393: {region: 0xa8, script: 0x5b, flags: 0x0}, + 394: {region: 0x166, script: 0x5b, flags: 0x0}, + 395: {region: 0xe9, script: 0x5, flags: 0x0}, + 396: {region: 0x166, script: 0x5b, flags: 0x0}, + 397: {region: 0xe9, script: 0x5, flags: 0x0}, + 398: {region: 0x166, script: 0x5b, flags: 0x0}, + 399: {region: 0x166, script: 0x5b, flags: 0x0}, + 400: {region: 0x6f, script: 0x5b, flags: 0x0}, + 401: {region: 0x9d, script: 0x5, flags: 0x0}, + 402: {region: 0x166, script: 0x5b, flags: 0x0}, + 403: {region: 0x166, script: 0x2c, flags: 0x0}, + 404: {region: 0xf2, script: 0x5b, flags: 0x0}, + 405: {region: 0x166, script: 0x5b, flags: 0x0}, + 406: {region: 0x166, script: 0x5b, flags: 0x0}, + 407: {region: 0x166, script: 0x5b, flags: 0x0}, + 408: {region: 0x166, script: 0x2c, flags: 0x0}, + 409: {region: 0x166, script: 0x5b, flags: 0x0}, + 410: {region: 0x9a, script: 0x22, flags: 0x0}, + 411: {region: 0x9a, script: 0xe9, flags: 0x0}, + 412: {region: 0x96, script: 0x5b, flags: 0x0}, + 413: {region: 0xda, script: 0x5b, flags: 0x0}, + 414: {region: 0x131, script: 0x32, flags: 0x0}, + 415: {region: 0x166, script: 0x5b, flags: 0x0}, + 416: {region: 0xe, script: 0x2, flags: 0x1}, + 417: {region: 0x9a, script: 0xe, flags: 0x0}, + 418: {region: 0x166, script: 0x5b, flags: 0x0}, + 419: {region: 0x4e, script: 0x5b, flags: 0x0}, + 420: {region: 0x9a, script: 0x35, flags: 0x0}, + 421: {region: 0x41, script: 0x5b, flags: 0x0}, + 422: {region: 0x54, script: 0x5b, flags: 0x0}, + 423: {region: 0x166, script: 0x5b, flags: 0x0}, + 424: {region: 0x81, script: 0x5b, flags: 0x0}, + 425: {region: 0x166, script: 0x5b, flags: 0x0}, + 426: {region: 0x166, script: 0x5b, flags: 0x0}, + 427: {region: 0xa5, script: 0x5b, flags: 0x0}, + 428: {region: 0x99, script: 0x5b, flags: 0x0}, + 429: {region: 0x166, script: 0x5b, flags: 0x0}, + 430: {region: 0xdc, script: 0x22, flags: 0x0}, + 431: {region: 0x166, script: 0x5b, flags: 0x0}, + 432: {region: 0x166, script: 0x5, flags: 0x0}, + 433: {region: 0x49, script: 0x5b, flags: 0x0}, + 434: {region: 0x166, script: 0x5, flags: 0x0}, + 435: {region: 0x166, script: 0x5b, flags: 0x0}, + 436: {region: 0x10, script: 0x3, flags: 0x1}, + 437: {region: 0x166, script: 0x5b, flags: 0x0}, + 438: {region: 0x53, script: 0x3b, flags: 0x0}, + 439: {region: 0x166, script: 0x5b, flags: 0x0}, + 440: {region: 0x136, script: 0x5b, flags: 0x0}, + 441: {region: 0x24, script: 0x5, flags: 0x0}, + 442: {region: 0x166, script: 0x5b, flags: 0x0}, + 443: {region: 0x166, script: 0x2c, flags: 0x0}, + 444: {region: 0x98, script: 0x3e, flags: 0x0}, + 445: {region: 0x166, script: 0x5b, flags: 0x0}, + 446: {region: 0x9a, script: 0x22, flags: 0x0}, + 447: {region: 0x166, script: 0x5b, flags: 0x0}, + 448: {region: 0x74, script: 0x5b, flags: 0x0}, + 449: {region: 0x166, script: 0x5b, flags: 0x0}, + 450: {region: 0x166, script: 0x5b, flags: 0x0}, + 451: {region: 0xe8, script: 0x5b, flags: 0x0}, + 452: {region: 0x166, script: 0x5b, flags: 0x0}, + 453: {region: 0x12c, script: 0x40, flags: 0x0}, + 454: {region: 0x53, script: 0x92, flags: 0x0}, + 455: {region: 0x166, script: 0x5b, flags: 0x0}, + 456: {region: 0xe9, script: 0x5, flags: 0x0}, + 457: {region: 0x9a, script: 0x22, flags: 0x0}, + 458: {region: 0xb0, script: 0x41, flags: 0x0}, + 459: {region: 0xe8, script: 0x5b, flags: 0x0}, + 460: {region: 0xe9, script: 0x5, flags: 0x0}, + 461: {region: 0xe7, script: 0x5b, flags: 0x0}, + 462: {region: 0x9a, script: 0x22, flags: 0x0}, + 463: {region: 0x9a, script: 0x22, flags: 0x0}, + 464: {region: 0x166, script: 0x5b, flags: 0x0}, + 465: {region: 0x91, script: 0x5b, flags: 0x0}, + 466: {region: 0x61, script: 0x5b, flags: 0x0}, + 467: {region: 0x53, script: 0x3b, flags: 0x0}, + 468: {region: 0x92, script: 0x5b, flags: 0x0}, + 469: {region: 0x93, script: 0x5b, flags: 0x0}, + 470: {region: 0x166, script: 0x5b, flags: 0x0}, + 471: {region: 0x28, script: 0x8, flags: 0x0}, + 472: {region: 0xd3, script: 0x5b, flags: 0x0}, + 473: {region: 0x79, script: 0x5b, flags: 0x0}, + 474: {region: 0x166, script: 0x5b, flags: 0x0}, + 475: {region: 0x166, script: 0x5b, flags: 0x0}, + 476: {region: 0xd1, script: 0x5b, flags: 0x0}, + 477: {region: 0xd7, script: 0x5b, flags: 0x0}, + 478: {region: 0x166, script: 0x5b, flags: 0x0}, + 479: {region: 0x166, script: 0x5b, flags: 0x0}, + 480: {region: 0x166, script: 0x5b, flags: 0x0}, + 481: {region: 0x96, script: 0x5b, flags: 0x0}, + 482: {region: 0x166, script: 0x5b, flags: 0x0}, + 483: {region: 0x166, script: 0x5b, flags: 0x0}, + 484: {region: 0x166, script: 0x5b, flags: 0x0}, + 486: {region: 0x123, script: 0x5b, flags: 0x0}, + 487: {region: 0xd7, script: 0x5b, flags: 0x0}, + 488: {region: 0x166, script: 0x5b, flags: 0x0}, + 489: {region: 0x166, script: 0x5b, flags: 0x0}, + 490: {region: 0x53, script: 0xfd, flags: 0x0}, + 491: {region: 0x166, script: 0x5b, flags: 0x0}, + 492: {region: 0x136, script: 0x5b, flags: 0x0}, + 493: {region: 0x166, script: 0x5b, flags: 0x0}, + 494: {region: 0x49, script: 0x5b, flags: 0x0}, + 495: {region: 0x166, script: 0x5b, flags: 0x0}, + 496: {region: 0x166, script: 0x5b, flags: 0x0}, + 497: {region: 0xe8, script: 0x5b, flags: 0x0}, + 498: {region: 0x166, script: 0x5b, flags: 0x0}, + 499: {region: 0x96, script: 0x5b, flags: 0x0}, + 500: {region: 0x107, script: 0x20, flags: 0x0}, + 501: {region: 0x1, script: 0x5b, flags: 0x0}, + 502: {region: 0x166, script: 0x5b, flags: 0x0}, + 503: {region: 0x166, script: 0x5b, flags: 0x0}, + 504: {region: 0x9e, script: 0x5b, flags: 0x0}, + 505: {region: 0x9f, script: 0x5b, flags: 0x0}, + 506: {region: 0x49, script: 0x17, flags: 0x0}, + 507: {region: 0x98, script: 0x3e, flags: 0x0}, + 508: {region: 0x166, script: 0x5b, flags: 0x0}, + 509: {region: 0x166, script: 0x5b, flags: 0x0}, + 510: {region: 0x107, script: 0x5b, flags: 0x0}, + 511: {region: 0x166, script: 0x5b, flags: 0x0}, + 512: {region: 0xa3, script: 0x49, flags: 0x0}, + 513: {region: 0x166, script: 0x5b, flags: 0x0}, + 514: {region: 0xa1, script: 0x5b, flags: 0x0}, + 515: {region: 0x1, script: 0x5b, flags: 0x0}, + 516: {region: 0x166, script: 0x5b, flags: 0x0}, + 517: {region: 0x166, script: 0x5b, flags: 0x0}, + 518: {region: 0x166, script: 0x5b, flags: 0x0}, + 519: {region: 0x52, script: 0x5b, flags: 0x0}, + 520: {region: 0x131, script: 0x3e, flags: 0x0}, + 521: {region: 0x166, script: 0x5b, flags: 0x0}, + 522: {region: 0x130, script: 0x5b, flags: 0x0}, + 523: {region: 0xdc, script: 0x22, flags: 0x0}, + 524: {region: 0x166, script: 0x5b, flags: 0x0}, + 525: {region: 0x64, script: 0x5b, flags: 0x0}, + 526: {region: 0x96, script: 0x5b, flags: 0x0}, + 527: {region: 0x96, script: 0x5b, flags: 0x0}, + 528: {region: 0x7e, script: 0x2e, flags: 0x0}, + 529: {region: 0x138, script: 0x20, flags: 0x0}, + 530: {region: 0x68, script: 0x5b, flags: 0x0}, + 531: {region: 0xc5, script: 0x5b, flags: 0x0}, + 532: {region: 0x166, script: 0x5b, flags: 0x0}, + 533: {region: 0x166, script: 0x5b, flags: 0x0}, + 534: {region: 0xd7, script: 0x5b, flags: 0x0}, + 535: {region: 0xa5, script: 0x5b, flags: 0x0}, + 536: {region: 0xc4, script: 0x5b, flags: 0x0}, + 537: {region: 0x107, script: 0x20, flags: 0x0}, + 538: {region: 0x166, script: 0x5b, flags: 0x0}, + 539: {region: 0x166, script: 0x5b, flags: 0x0}, + 540: {region: 0x166, script: 0x5b, flags: 0x0}, + 541: {region: 0x166, script: 0x5b, flags: 0x0}, + 542: {region: 0xd5, script: 0x5, flags: 0x0}, + 543: {region: 0xd7, script: 0x5b, flags: 0x0}, + 544: {region: 0x165, script: 0x5b, flags: 0x0}, + 545: {region: 0x166, script: 0x5b, flags: 0x0}, + 546: {region: 0x166, script: 0x5b, flags: 0x0}, + 547: {region: 0x130, script: 0x5b, flags: 0x0}, + 548: {region: 0x123, script: 0x5, flags: 0x0}, + 549: {region: 0x166, script: 0x5b, flags: 0x0}, + 550: {region: 0x124, script: 0xee, flags: 0x0}, + 551: {region: 0x5b, script: 0x5b, flags: 0x0}, + 552: {region: 0x52, script: 0x5b, flags: 0x0}, + 553: {region: 0x166, script: 0x5b, flags: 0x0}, + 554: {region: 0x4f, script: 0x5b, flags: 0x0}, + 555: {region: 0x9a, script: 0x22, flags: 0x0}, + 556: {region: 0x9a, script: 0x22, flags: 0x0}, + 557: {region: 0x4b, script: 0x5b, flags: 0x0}, + 558: {region: 0x96, script: 0x5b, flags: 0x0}, + 559: {region: 0x166, script: 0x5b, flags: 0x0}, + 560: {region: 0x41, script: 0x5b, flags: 0x0}, + 561: {region: 0x9a, script: 0x5b, flags: 0x0}, + 562: {region: 0x53, script: 0xe5, flags: 0x0}, + 563: {region: 0x9a, script: 0x22, flags: 0x0}, + 564: {region: 0xc4, script: 0x5b, flags: 0x0}, + 565: {region: 0x166, script: 0x5b, flags: 0x0}, + 566: {region: 0x9a, script: 0x76, flags: 0x0}, + 567: {region: 0xe9, script: 0x5, flags: 0x0}, + 568: {region: 0x166, script: 0x5b, flags: 0x0}, + 569: {region: 0xa5, script: 0x5b, flags: 0x0}, + 570: {region: 0x166, script: 0x5b, flags: 0x0}, + 571: {region: 0x12c, script: 0x5b, flags: 0x0}, + 572: {region: 0x166, script: 0x5b, flags: 0x0}, + 573: {region: 0xd3, script: 0x5b, flags: 0x0}, + 574: {region: 0x166, script: 0x5b, flags: 0x0}, + 575: {region: 0xb0, script: 0x58, flags: 0x0}, + 576: {region: 0x166, script: 0x5b, flags: 0x0}, + 577: {region: 0x166, script: 0x5b, flags: 0x0}, + 578: {region: 0x13, script: 0x6, flags: 0x1}, + 579: {region: 0x166, script: 0x5b, flags: 0x0}, + 580: {region: 0x52, script: 0x5b, flags: 0x0}, + 581: {region: 0x83, script: 0x5b, flags: 0x0}, + 582: {region: 0xa5, script: 0x5b, flags: 0x0}, + 583: {region: 0x166, script: 0x5b, flags: 0x0}, + 584: {region: 0x166, script: 0x5b, flags: 0x0}, + 585: {region: 0x166, script: 0x5b, flags: 0x0}, + 586: {region: 0xa7, script: 0x4f, flags: 0x0}, + 587: {region: 0x2a, script: 0x5b, flags: 0x0}, + 588: {region: 0x166, script: 0x5b, flags: 0x0}, + 589: {region: 0x166, script: 0x5b, flags: 0x0}, + 590: {region: 0x166, script: 0x5b, flags: 0x0}, + 591: {region: 0x166, script: 0x5b, flags: 0x0}, + 592: {region: 0x166, script: 0x5b, flags: 0x0}, + 593: {region: 0x9a, script: 0x53, flags: 0x0}, + 594: {region: 0x8c, script: 0x5b, flags: 0x0}, + 595: {region: 0x166, script: 0x5b, flags: 0x0}, + 596: {region: 0xac, script: 0x54, flags: 0x0}, + 597: {region: 0x107, script: 0x20, flags: 0x0}, + 598: {region: 0x9a, script: 0x22, flags: 0x0}, + 599: {region: 0x166, script: 0x5b, flags: 0x0}, + 600: {region: 0x76, script: 0x5b, flags: 0x0}, + 601: {region: 0x166, script: 0x5b, flags: 0x0}, + 602: {region: 0xb5, script: 0x5b, flags: 0x0}, + 603: {region: 0x166, script: 0x5b, flags: 0x0}, + 604: {region: 0x166, script: 0x5b, flags: 0x0}, + 605: {region: 0x166, script: 0x5b, flags: 0x0}, + 606: {region: 0x166, script: 0x5b, flags: 0x0}, + 607: {region: 0x166, script: 0x5b, flags: 0x0}, + 608: {region: 0x166, script: 0x5b, flags: 0x0}, + 609: {region: 0x166, script: 0x5b, flags: 0x0}, + 610: {region: 0x166, script: 0x2c, flags: 0x0}, + 611: {region: 0x166, script: 0x5b, flags: 0x0}, + 612: {region: 0x107, script: 0x20, flags: 0x0}, + 613: {region: 0x113, script: 0x5b, flags: 0x0}, + 614: {region: 0xe8, script: 0x5b, flags: 0x0}, + 615: {region: 0x107, script: 0x5b, flags: 0x0}, + 616: {region: 0x166, script: 0x5b, flags: 0x0}, + 617: {region: 0x9a, script: 0x22, flags: 0x0}, + 618: {region: 0x9a, script: 0x5, flags: 0x0}, + 619: {region: 0x130, script: 0x5b, flags: 0x0}, + 620: {region: 0x166, script: 0x5b, flags: 0x0}, + 621: {region: 0x52, script: 0x5b, flags: 0x0}, + 622: {region: 0x61, script: 0x5b, flags: 0x0}, + 623: {region: 0x166, script: 0x5b, flags: 0x0}, + 624: {region: 0x166, script: 0x5b, flags: 0x0}, + 625: {region: 0x166, script: 0x2c, flags: 0x0}, + 626: {region: 0x166, script: 0x5b, flags: 0x0}, + 627: {region: 0x166, script: 0x5b, flags: 0x0}, + 628: {region: 0x19, script: 0x3, flags: 0x1}, + 629: {region: 0x166, script: 0x5b, flags: 0x0}, + 630: {region: 0x166, script: 0x5b, flags: 0x0}, + 631: {region: 0x166, script: 0x5b, flags: 0x0}, + 632: {region: 0x166, script: 0x5b, flags: 0x0}, + 633: {region: 0x107, script: 0x20, flags: 0x0}, + 634: {region: 0x166, script: 0x5b, flags: 0x0}, + 635: {region: 0x166, script: 0x5b, flags: 0x0}, + 636: {region: 0x166, script: 0x5b, flags: 0x0}, + 637: {region: 0x107, script: 0x20, flags: 0x0}, + 638: {region: 0x166, script: 0x5b, flags: 0x0}, + 639: {region: 0x96, script: 0x5b, flags: 0x0}, + 640: {region: 0xe9, script: 0x5, flags: 0x0}, + 641: {region: 0x7c, script: 0x5b, flags: 0x0}, + 642: {region: 0x166, script: 0x5b, flags: 0x0}, + 643: {region: 0x166, script: 0x5b, flags: 0x0}, + 644: {region: 0x166, script: 0x5b, flags: 0x0}, + 645: {region: 0x166, script: 0x2c, flags: 0x0}, + 646: {region: 0x124, script: 0xee, flags: 0x0}, + 647: {region: 0xe9, script: 0x5, flags: 0x0}, + 648: {region: 0x166, script: 0x5b, flags: 0x0}, + 649: {region: 0x166, script: 0x5b, flags: 0x0}, + 650: {region: 0x1c, script: 0x5, flags: 0x1}, + 651: {region: 0x166, script: 0x5b, flags: 0x0}, + 652: {region: 0x166, script: 0x5b, flags: 0x0}, + 653: {region: 0x166, script: 0x5b, flags: 0x0}, + 654: {region: 0x139, script: 0x5b, flags: 0x0}, + 655: {region: 0x88, script: 0x5f, flags: 0x0}, + 656: {region: 0x98, script: 0x3e, flags: 0x0}, + 657: {region: 0x130, script: 0x5b, flags: 0x0}, + 658: {region: 0xe9, script: 0x5, flags: 0x0}, + 659: {region: 0x132, script: 0x5b, flags: 0x0}, + 660: {region: 0x166, script: 0x5b, flags: 0x0}, + 661: {region: 0xb8, script: 0x5b, flags: 0x0}, + 662: {region: 0x107, script: 0x20, flags: 0x0}, + 663: {region: 0x166, script: 0x5b, flags: 0x0}, + 664: {region: 0x96, script: 0x5b, flags: 0x0}, + 665: {region: 0x166, script: 0x5b, flags: 0x0}, + 666: {region: 0x53, script: 0xee, flags: 0x0}, + 667: {region: 0x166, script: 0x5b, flags: 0x0}, + 668: {region: 0x166, script: 0x5b, flags: 0x0}, + 669: {region: 0x166, script: 0x5b, flags: 0x0}, + 670: {region: 0x166, script: 0x5b, flags: 0x0}, + 671: {region: 0x9a, script: 0x5d, flags: 0x0}, + 672: {region: 0x166, script: 0x5b, flags: 0x0}, + 673: {region: 0x166, script: 0x5b, flags: 0x0}, + 674: {region: 0x107, script: 0x20, flags: 0x0}, + 675: {region: 0x132, script: 0x5b, flags: 0x0}, + 676: {region: 0x166, script: 0x5b, flags: 0x0}, + 677: {region: 0xda, script: 0x5b, flags: 0x0}, + 678: {region: 0x166, script: 0x5b, flags: 0x0}, + 679: {region: 0x166, script: 0x5b, flags: 0x0}, + 680: {region: 0x21, script: 0x2, flags: 0x1}, + 681: {region: 0x166, script: 0x5b, flags: 0x0}, + 682: {region: 0x166, script: 0x5b, flags: 0x0}, + 683: {region: 0x9f, script: 0x5b, flags: 0x0}, + 684: {region: 0x53, script: 0x61, flags: 0x0}, + 685: {region: 0x96, script: 0x5b, flags: 0x0}, + 686: {region: 0x9d, script: 0x5, flags: 0x0}, + 687: {region: 0x136, script: 0x5b, flags: 0x0}, + 688: {region: 0x166, script: 0x5b, flags: 0x0}, + 689: {region: 0x166, script: 0x5b, flags: 0x0}, + 690: {region: 0x9a, script: 0xe9, flags: 0x0}, + 691: {region: 0x9f, script: 0x5b, flags: 0x0}, + 692: {region: 0x166, script: 0x5b, flags: 0x0}, + 693: {region: 0x4b, script: 0x5b, flags: 0x0}, + 694: {region: 0x166, script: 0x5b, flags: 0x0}, + 695: {region: 0x166, script: 0x5b, flags: 0x0}, + 696: {region: 0xb0, script: 0x58, flags: 0x0}, + 697: {region: 0x166, script: 0x5b, flags: 0x0}, + 698: {region: 0x166, script: 0x5b, flags: 0x0}, + 699: {region: 0x4b, script: 0x5b, flags: 0x0}, + 700: {region: 0x166, script: 0x5b, flags: 0x0}, + 701: {region: 0x166, script: 0x5b, flags: 0x0}, + 702: {region: 0x163, script: 0x5b, flags: 0x0}, + 703: {region: 0x9d, script: 0x5, flags: 0x0}, + 704: {region: 0xb7, script: 0x5b, flags: 0x0}, + 705: {region: 0xb9, script: 0x5b, flags: 0x0}, + 706: {region: 0x4b, script: 0x5b, flags: 0x0}, + 707: {region: 0x4b, script: 0x5b, flags: 0x0}, + 708: {region: 0xa5, script: 0x5b, flags: 0x0}, + 709: {region: 0xa5, script: 0x5b, flags: 0x0}, + 710: {region: 0x9d, script: 0x5, flags: 0x0}, + 711: {region: 0xb9, script: 0x5b, flags: 0x0}, + 712: {region: 0x124, script: 0xee, flags: 0x0}, + 713: {region: 0x53, script: 0x3b, flags: 0x0}, + 714: {region: 0x12c, script: 0x5b, flags: 0x0}, + 715: {region: 0x96, script: 0x5b, flags: 0x0}, + 716: {region: 0x52, script: 0x5b, flags: 0x0}, + 717: {region: 0x9a, script: 0x22, flags: 0x0}, + 718: {region: 0x9a, script: 0x22, flags: 0x0}, + 719: {region: 0x96, script: 0x5b, flags: 0x0}, + 720: {region: 0x23, script: 0x3, flags: 0x1}, + 721: {region: 0xa5, script: 0x5b, flags: 0x0}, + 722: {region: 0x166, script: 0x5b, flags: 0x0}, + 723: {region: 0xd0, script: 0x5b, flags: 0x0}, + 724: {region: 0x166, script: 0x5b, flags: 0x0}, + 725: {region: 0x166, script: 0x5b, flags: 0x0}, + 726: {region: 0x166, script: 0x5b, flags: 0x0}, + 727: {region: 0x166, script: 0x5b, flags: 0x0}, + 728: {region: 0x166, script: 0x5b, flags: 0x0}, + 729: {region: 0x166, script: 0x5b, flags: 0x0}, + 730: {region: 0x166, script: 0x5b, flags: 0x0}, + 731: {region: 0x166, script: 0x5b, flags: 0x0}, + 732: {region: 0x166, script: 0x5b, flags: 0x0}, + 733: {region: 0x166, script: 0x5b, flags: 0x0}, + 734: {region: 0x166, script: 0x5b, flags: 0x0}, + 735: {region: 0x166, script: 0x5, flags: 0x0}, + 736: {region: 0x107, script: 0x20, flags: 0x0}, + 737: {region: 0xe8, script: 0x5b, flags: 0x0}, + 738: {region: 0x166, script: 0x5b, flags: 0x0}, + 739: {region: 0x96, script: 0x5b, flags: 0x0}, + 740: {region: 0x166, script: 0x2c, flags: 0x0}, + 741: {region: 0x166, script: 0x5b, flags: 0x0}, + 742: {region: 0x166, script: 0x5b, flags: 0x0}, + 743: {region: 0x166, script: 0x5b, flags: 0x0}, + 744: {region: 0x113, script: 0x5b, flags: 0x0}, + 745: {region: 0xa5, script: 0x5b, flags: 0x0}, + 746: {region: 0x166, script: 0x5b, flags: 0x0}, + 747: {region: 0x166, script: 0x5b, flags: 0x0}, + 748: {region: 0x124, script: 0x5, flags: 0x0}, + 749: {region: 0xcd, script: 0x5b, flags: 0x0}, + 750: {region: 0x166, script: 0x5b, flags: 0x0}, + 751: {region: 0x166, script: 0x5b, flags: 0x0}, + 752: {region: 0x166, script: 0x5b, flags: 0x0}, + 753: {region: 0xc0, script: 0x5b, flags: 0x0}, + 754: {region: 0xd2, script: 0x5b, flags: 0x0}, + 755: {region: 0x166, script: 0x5b, flags: 0x0}, + 756: {region: 0x52, script: 0x5b, flags: 0x0}, + 757: {region: 0xdc, script: 0x22, flags: 0x0}, + 758: {region: 0x130, script: 0x5b, flags: 0x0}, + 759: {region: 0xc1, script: 0x5b, flags: 0x0}, + 760: {region: 0x166, script: 0x5b, flags: 0x0}, + 761: {region: 0x166, script: 0x5b, flags: 0x0}, + 762: {region: 0xe1, script: 0x5b, flags: 0x0}, + 763: {region: 0x166, script: 0x5b, flags: 0x0}, + 764: {region: 0x96, script: 0x5b, flags: 0x0}, + 765: {region: 0x9c, script: 0x3d, flags: 0x0}, + 766: {region: 0x166, script: 0x5b, flags: 0x0}, + 767: {region: 0xc3, script: 0x20, flags: 0x0}, + 768: {region: 0x166, script: 0x5, flags: 0x0}, + 769: {region: 0x166, script: 0x5b, flags: 0x0}, + 770: {region: 0x166, script: 0x5b, flags: 0x0}, + 771: {region: 0x166, script: 0x5b, flags: 0x0}, + 772: {region: 0x9a, script: 0x6f, flags: 0x0}, + 773: {region: 0x166, script: 0x5b, flags: 0x0}, + 774: {region: 0x166, script: 0x5b, flags: 0x0}, + 775: {region: 0x10c, script: 0x5b, flags: 0x0}, + 776: {region: 0x166, script: 0x5b, flags: 0x0}, + 777: {region: 0x166, script: 0x5b, flags: 0x0}, + 778: {region: 0x166, script: 0x5b, flags: 0x0}, + 779: {region: 0x26, script: 0x3, flags: 0x1}, + 780: {region: 0x166, script: 0x5b, flags: 0x0}, + 781: {region: 0x166, script: 0x5b, flags: 0x0}, + 782: {region: 0x9a, script: 0xe, flags: 0x0}, + 783: {region: 0xc5, script: 0x76, flags: 0x0}, + 785: {region: 0x166, script: 0x5b, flags: 0x0}, + 786: {region: 0x49, script: 0x5b, flags: 0x0}, + 787: {region: 0x49, script: 0x5b, flags: 0x0}, + 788: {region: 0x37, script: 0x5b, flags: 0x0}, + 789: {region: 0x166, script: 0x5b, flags: 0x0}, + 790: {region: 0x166, script: 0x5b, flags: 0x0}, + 791: {region: 0x166, script: 0x5b, flags: 0x0}, + 792: {region: 0x166, script: 0x5b, flags: 0x0}, + 793: {region: 0x166, script: 0x5b, flags: 0x0}, + 794: {region: 0x166, script: 0x5b, flags: 0x0}, + 795: {region: 0x9a, script: 0x22, flags: 0x0}, + 796: {region: 0xdc, script: 0x22, flags: 0x0}, + 797: {region: 0x107, script: 0x20, flags: 0x0}, + 798: {region: 0x35, script: 0x73, flags: 0x0}, + 799: {region: 0x29, script: 0x3, flags: 0x1}, + 800: {region: 0xcc, script: 0x5b, flags: 0x0}, + 801: {region: 0x166, script: 0x5b, flags: 0x0}, + 802: {region: 0x166, script: 0x5b, flags: 0x0}, + 803: {region: 0x166, script: 0x5b, flags: 0x0}, + 804: {region: 0x9a, script: 0x22, flags: 0x0}, + 805: {region: 0x52, script: 0x5b, flags: 0x0}, + 807: {region: 0x166, script: 0x5b, flags: 0x0}, + 808: {region: 0x136, script: 0x5b, flags: 0x0}, + 809: {region: 0x166, script: 0x5b, flags: 0x0}, + 810: {region: 0x166, script: 0x5b, flags: 0x0}, + 811: {region: 0xe9, script: 0x5, flags: 0x0}, + 812: {region: 0xc4, script: 0x5b, flags: 0x0}, + 813: {region: 0x9a, script: 0x22, flags: 0x0}, + 814: {region: 0x96, script: 0x5b, flags: 0x0}, + 815: {region: 0x165, script: 0x5b, flags: 0x0}, + 816: {region: 0x166, script: 0x5b, flags: 0x0}, + 817: {region: 0xc5, script: 0x76, flags: 0x0}, + 818: {region: 0x166, script: 0x5b, flags: 0x0}, + 819: {region: 0x166, script: 0x2c, flags: 0x0}, + 820: {region: 0x107, script: 0x20, flags: 0x0}, + 821: {region: 0x166, script: 0x5b, flags: 0x0}, + 822: {region: 0x132, script: 0x5b, flags: 0x0}, + 823: {region: 0x9d, script: 0x67, flags: 0x0}, + 824: {region: 0x166, script: 0x5b, flags: 0x0}, + 825: {region: 0x166, script: 0x5b, flags: 0x0}, + 826: {region: 0x9d, script: 0x5, flags: 0x0}, + 827: {region: 0x166, script: 0x5b, flags: 0x0}, + 828: {region: 0x166, script: 0x5b, flags: 0x0}, + 829: {region: 0x166, script: 0x5b, flags: 0x0}, + 830: {region: 0xde, script: 0x5b, flags: 0x0}, + 831: {region: 0x166, script: 0x5b, flags: 0x0}, + 832: {region: 0x166, script: 0x5b, flags: 0x0}, + 834: {region: 0x166, script: 0x5b, flags: 0x0}, + 835: {region: 0x53, script: 0x3b, flags: 0x0}, + 836: {region: 0x9f, script: 0x5b, flags: 0x0}, + 837: {region: 0xd3, script: 0x5b, flags: 0x0}, + 838: {region: 0x166, script: 0x5b, flags: 0x0}, + 839: {region: 0xdb, script: 0x5b, flags: 0x0}, + 840: {region: 0x166, script: 0x5b, flags: 0x0}, + 841: {region: 0x166, script: 0x5b, flags: 0x0}, + 842: {region: 0x166, script: 0x5b, flags: 0x0}, + 843: {region: 0xd0, script: 0x5b, flags: 0x0}, + 844: {region: 0x166, script: 0x5b, flags: 0x0}, + 845: {region: 0x166, script: 0x5b, flags: 0x0}, + 846: {region: 0x165, script: 0x5b, flags: 0x0}, + 847: {region: 0xd2, script: 0x5b, flags: 0x0}, + 848: {region: 0x61, script: 0x5b, flags: 0x0}, + 849: {region: 0xdc, script: 0x22, flags: 0x0}, + 850: {region: 0x166, script: 0x5b, flags: 0x0}, + 851: {region: 0xdc, script: 0x22, flags: 0x0}, + 852: {region: 0x166, script: 0x5b, flags: 0x0}, + 853: {region: 0x166, script: 0x5b, flags: 0x0}, + 854: {region: 0xd3, script: 0x5b, flags: 0x0}, + 855: {region: 0x166, script: 0x5b, flags: 0x0}, + 856: {region: 0x166, script: 0x5b, flags: 0x0}, + 857: {region: 0xd2, script: 0x5b, flags: 0x0}, + 858: {region: 0x166, script: 0x5b, flags: 0x0}, + 859: {region: 0xd0, script: 0x5b, flags: 0x0}, + 860: {region: 0xd0, script: 0x5b, flags: 0x0}, + 861: {region: 0x166, script: 0x5b, flags: 0x0}, + 862: {region: 0x166, script: 0x5b, flags: 0x0}, + 863: {region: 0x96, script: 0x5b, flags: 0x0}, + 864: {region: 0x166, script: 0x5b, flags: 0x0}, + 865: {region: 0xe0, script: 0x5b, flags: 0x0}, + 866: {region: 0x166, script: 0x5b, flags: 0x0}, + 867: {region: 0x166, script: 0x5b, flags: 0x0}, + 868: {region: 0x9a, script: 0x5b, flags: 0x0}, + 869: {region: 0x166, script: 0x5b, flags: 0x0}, + 870: {region: 0x166, script: 0x5b, flags: 0x0}, + 871: {region: 0xda, script: 0x5b, flags: 0x0}, + 872: {region: 0x52, script: 0x5b, flags: 0x0}, + 873: {region: 0x166, script: 0x5b, flags: 0x0}, + 874: {region: 0xdb, script: 0x5b, flags: 0x0}, + 875: {region: 0x166, script: 0x5b, flags: 0x0}, + 876: {region: 0x52, script: 0x5b, flags: 0x0}, + 877: {region: 0x166, script: 0x5b, flags: 0x0}, + 878: {region: 0x166, script: 0x5b, flags: 0x0}, + 879: {region: 0xdb, script: 0x5b, flags: 0x0}, + 880: {region: 0x124, script: 0x57, flags: 0x0}, + 881: {region: 0x9a, script: 0x22, flags: 0x0}, + 882: {region: 0x10d, script: 0xcb, flags: 0x0}, + 883: {region: 0x166, script: 0x5b, flags: 0x0}, + 884: {region: 0x166, script: 0x5b, flags: 0x0}, + 885: {region: 0x85, script: 0x7e, flags: 0x0}, + 886: {region: 0x162, script: 0x5b, flags: 0x0}, + 887: {region: 0x166, script: 0x5b, flags: 0x0}, + 888: {region: 0x49, script: 0x17, flags: 0x0}, + 889: {region: 0x166, script: 0x5b, flags: 0x0}, + 890: {region: 0x162, script: 0x5b, flags: 0x0}, + 891: {region: 0x166, script: 0x5b, flags: 0x0}, + 892: {region: 0x166, script: 0x5b, flags: 0x0}, + 893: {region: 0x166, script: 0x5b, flags: 0x0}, + 894: {region: 0x166, script: 0x5b, flags: 0x0}, + 895: {region: 0x166, script: 0x5b, flags: 0x0}, + 896: {region: 0x118, script: 0x5b, flags: 0x0}, + 897: {region: 0x166, script: 0x5b, flags: 0x0}, + 898: {region: 0x166, script: 0x5b, flags: 0x0}, + 899: {region: 0x136, script: 0x5b, flags: 0x0}, + 900: {region: 0x166, script: 0x5b, flags: 0x0}, + 901: {region: 0x53, script: 0x5b, flags: 0x0}, + 902: {region: 0x166, script: 0x5b, flags: 0x0}, + 903: {region: 0xcf, script: 0x5b, flags: 0x0}, + 904: {region: 0x130, script: 0x5b, flags: 0x0}, + 905: {region: 0x132, script: 0x5b, flags: 0x0}, + 906: {region: 0x81, script: 0x5b, flags: 0x0}, + 907: {region: 0x79, script: 0x5b, flags: 0x0}, + 908: {region: 0x166, script: 0x5b, flags: 0x0}, + 910: {region: 0x166, script: 0x5b, flags: 0x0}, + 911: {region: 0x166, script: 0x5b, flags: 0x0}, + 912: {region: 0x70, script: 0x5b, flags: 0x0}, + 913: {region: 0x166, script: 0x5b, flags: 0x0}, + 914: {region: 0x166, script: 0x5b, flags: 0x0}, + 915: {region: 0x166, script: 0x5b, flags: 0x0}, + 916: {region: 0x166, script: 0x5b, flags: 0x0}, + 917: {region: 0x9a, script: 0x83, flags: 0x0}, + 918: {region: 0x166, script: 0x5b, flags: 0x0}, + 919: {region: 0x166, script: 0x5, flags: 0x0}, + 920: {region: 0x7e, script: 0x20, flags: 0x0}, + 921: {region: 0x136, script: 0x84, flags: 0x0}, + 922: {region: 0x166, script: 0x5, flags: 0x0}, + 923: {region: 0xc6, script: 0x82, flags: 0x0}, + 924: {region: 0x166, script: 0x5b, flags: 0x0}, + 925: {region: 0x2c, script: 0x3, flags: 0x1}, + 926: {region: 0xe8, script: 0x5b, flags: 0x0}, + 927: {region: 0x2f, script: 0x2, flags: 0x1}, + 928: {region: 0xe8, script: 0x5b, flags: 0x0}, + 929: {region: 0x30, script: 0x5b, flags: 0x0}, + 930: {region: 0xf1, script: 0x5b, flags: 0x0}, + 931: {region: 0x166, script: 0x5b, flags: 0x0}, + 932: {region: 0x79, script: 0x5b, flags: 0x0}, + 933: {region: 0xd7, script: 0x5b, flags: 0x0}, + 934: {region: 0x136, script: 0x5b, flags: 0x0}, + 935: {region: 0x49, script: 0x5b, flags: 0x0}, + 936: {region: 0x166, script: 0x5b, flags: 0x0}, + 937: {region: 0x9d, script: 0xfa, flags: 0x0}, + 938: {region: 0x166, script: 0x5b, flags: 0x0}, + 939: {region: 0x61, script: 0x5b, flags: 0x0}, + 940: {region: 0x166, script: 0x5, flags: 0x0}, + 941: {region: 0xb1, script: 0x90, flags: 0x0}, + 943: {region: 0x166, script: 0x5b, flags: 0x0}, + 944: {region: 0x166, script: 0x5b, flags: 0x0}, + 945: {region: 0x9a, script: 0x12, flags: 0x0}, + 946: {region: 0xa5, script: 0x5b, flags: 0x0}, + 947: {region: 0xea, script: 0x5b, flags: 0x0}, + 948: {region: 0x166, script: 0x5b, flags: 0x0}, + 949: {region: 0x9f, script: 0x5b, flags: 0x0}, + 950: {region: 0x166, script: 0x5b, flags: 0x0}, + 951: {region: 0x166, script: 0x5b, flags: 0x0}, + 952: {region: 0x88, script: 0x34, flags: 0x0}, + 953: {region: 0x76, script: 0x5b, flags: 0x0}, + 954: {region: 0x166, script: 0x5b, flags: 0x0}, + 955: {region: 0xe9, script: 0x4e, flags: 0x0}, + 956: {region: 0x9d, script: 0x5, flags: 0x0}, + 957: {region: 0x1, script: 0x5b, flags: 0x0}, + 958: {region: 0x24, script: 0x5, flags: 0x0}, + 959: {region: 0x166, script: 0x5b, flags: 0x0}, + 960: {region: 0x41, script: 0x5b, flags: 0x0}, + 961: {region: 0x166, script: 0x5b, flags: 0x0}, + 962: {region: 0x7b, script: 0x5b, flags: 0x0}, + 963: {region: 0x166, script: 0x5b, flags: 0x0}, + 964: {region: 0xe5, script: 0x5b, flags: 0x0}, + 965: {region: 0x8a, script: 0x5b, flags: 0x0}, + 966: {region: 0x6a, script: 0x5b, flags: 0x0}, + 967: {region: 0x166, script: 0x5b, flags: 0x0}, + 968: {region: 0x9a, script: 0x22, flags: 0x0}, + 969: {region: 0x166, script: 0x5b, flags: 0x0}, + 970: {region: 0x103, script: 0x5b, flags: 0x0}, + 971: {region: 0x96, script: 0x5b, flags: 0x0}, + 972: {region: 0x166, script: 0x5b, flags: 0x0}, + 973: {region: 0x166, script: 0x5b, flags: 0x0}, + 974: {region: 0x9f, script: 0x5b, flags: 0x0}, + 975: {region: 0x166, script: 0x5, flags: 0x0}, + 976: {region: 0x9a, script: 0x5b, flags: 0x0}, + 977: {region: 0x31, script: 0x2, flags: 0x1}, + 978: {region: 0xdc, script: 0x22, flags: 0x0}, + 979: {region: 0x35, script: 0xe, flags: 0x0}, + 980: {region: 0x4e, script: 0x5b, flags: 0x0}, + 981: {region: 0x73, script: 0x5b, flags: 0x0}, + 982: {region: 0x4e, script: 0x5b, flags: 0x0}, + 983: {region: 0x9d, script: 0x5, flags: 0x0}, + 984: {region: 0x10d, script: 0x5b, flags: 0x0}, + 985: {region: 0x3a, script: 0x5b, flags: 0x0}, + 986: {region: 0x166, script: 0x5b, flags: 0x0}, + 987: {region: 0xd2, script: 0x5b, flags: 0x0}, + 988: {region: 0x105, script: 0x5b, flags: 0x0}, + 989: {region: 0x96, script: 0x5b, flags: 0x0}, + 990: {region: 0x130, script: 0x5b, flags: 0x0}, + 991: {region: 0x166, script: 0x5b, flags: 0x0}, + 992: {region: 0x166, script: 0x5b, flags: 0x0}, + 993: {region: 0x74, script: 0x5b, flags: 0x0}, + 994: {region: 0x107, script: 0x20, flags: 0x0}, + 995: {region: 0x131, script: 0x20, flags: 0x0}, + 996: {region: 0x10a, script: 0x5b, flags: 0x0}, + 997: {region: 0x108, script: 0x5b, flags: 0x0}, + 998: {region: 0x130, script: 0x5b, flags: 0x0}, + 999: {region: 0x166, script: 0x5b, flags: 0x0}, + 1000: {region: 0xa3, script: 0x4c, flags: 0x0}, + 1001: {region: 0x9a, script: 0x22, flags: 0x0}, + 1002: {region: 0x81, script: 0x5b, flags: 0x0}, + 1003: {region: 0x107, script: 0x20, flags: 0x0}, + 1004: {region: 0xa5, script: 0x5b, flags: 0x0}, + 1005: {region: 0x96, script: 0x5b, flags: 0x0}, + 1006: {region: 0x9a, script: 0x5b, flags: 0x0}, + 1007: {region: 0x115, script: 0x5b, flags: 0x0}, + 1008: {region: 0x9a, script: 0xcf, flags: 0x0}, + 1009: {region: 0x166, script: 0x5b, flags: 0x0}, + 1010: {region: 0x166, script: 0x5b, flags: 0x0}, + 1011: {region: 0x130, script: 0x5b, flags: 0x0}, + 1012: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1013: {region: 0x9a, script: 0x22, flags: 0x0}, + 1014: {region: 0x166, script: 0x5, flags: 0x0}, + 1015: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1016: {region: 0x7c, script: 0x5b, flags: 0x0}, + 1017: {region: 0x49, script: 0x5b, flags: 0x0}, + 1018: {region: 0x33, script: 0x4, flags: 0x1}, + 1019: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1020: {region: 0x9d, script: 0x5, flags: 0x0}, + 1021: {region: 0xdb, script: 0x5b, flags: 0x0}, + 1022: {region: 0x4f, script: 0x5b, flags: 0x0}, + 1023: {region: 0xd2, script: 0x5b, flags: 0x0}, + 1024: {region: 0xd0, script: 0x5b, flags: 0x0}, + 1025: {region: 0xc4, script: 0x5b, flags: 0x0}, + 1026: {region: 0x4c, script: 0x5b, flags: 0x0}, + 1027: {region: 0x97, script: 0x80, flags: 0x0}, + 1028: {region: 0xb7, script: 0x5b, flags: 0x0}, + 1029: {region: 0x166, script: 0x2c, flags: 0x0}, + 1030: {region: 0x166, script: 0x5b, flags: 0x0}, + 1032: {region: 0xbb, script: 0xeb, flags: 0x0}, + 1033: {region: 0x166, script: 0x5b, flags: 0x0}, + 1034: {region: 0xc5, script: 0x76, flags: 0x0}, + 1035: {region: 0x166, script: 0x5, flags: 0x0}, + 1036: {region: 0xb4, script: 0xd6, flags: 0x0}, + 1037: {region: 0x70, script: 0x5b, flags: 0x0}, + 1038: {region: 0x166, script: 0x5b, flags: 0x0}, + 1039: {region: 0x166, script: 0x5b, flags: 0x0}, + 1040: {region: 0x166, script: 0x5b, flags: 0x0}, + 1041: {region: 0x166, script: 0x5b, flags: 0x0}, + 1042: {region: 0x112, script: 0x5b, flags: 0x0}, + 1043: {region: 0x166, script: 0x5b, flags: 0x0}, + 1044: {region: 0xe9, script: 0x5, flags: 0x0}, + 1045: {region: 0x166, script: 0x5b, flags: 0x0}, + 1046: {region: 0x110, script: 0x5b, flags: 0x0}, + 1047: {region: 0x166, script: 0x5b, flags: 0x0}, + 1048: {region: 0xea, script: 0x5b, flags: 0x0}, + 1049: {region: 0x166, script: 0x5b, flags: 0x0}, + 1050: {region: 0x96, script: 0x5b, flags: 0x0}, + 1051: {region: 0x143, script: 0x5b, flags: 0x0}, + 1052: {region: 0x10d, script: 0x5b, flags: 0x0}, + 1054: {region: 0x10d, script: 0x5b, flags: 0x0}, + 1055: {region: 0x73, script: 0x5b, flags: 0x0}, + 1056: {region: 0x98, script: 0xcc, flags: 0x0}, + 1057: {region: 0x166, script: 0x5b, flags: 0x0}, + 1058: {region: 0x73, script: 0x5b, flags: 0x0}, + 1059: {region: 0x165, script: 0x5b, flags: 0x0}, + 1060: {region: 0x166, script: 0x5b, flags: 0x0}, + 1061: {region: 0xc4, script: 0x5b, flags: 0x0}, + 1062: {region: 0x166, script: 0x5b, flags: 0x0}, + 1063: {region: 0x166, script: 0x5b, flags: 0x0}, + 1064: {region: 0x166, script: 0x5b, flags: 0x0}, + 1065: {region: 0x116, script: 0x5b, flags: 0x0}, + 1066: {region: 0x166, script: 0x5b, flags: 0x0}, + 1067: {region: 0x166, script: 0x5b, flags: 0x0}, + 1068: {region: 0x124, script: 0xee, flags: 0x0}, + 1069: {region: 0x166, script: 0x5b, flags: 0x0}, + 1070: {region: 0x166, script: 0x5b, flags: 0x0}, + 1071: {region: 0x166, script: 0x5b, flags: 0x0}, + 1072: {region: 0x166, script: 0x5b, flags: 0x0}, + 1073: {region: 0x27, script: 0x5b, flags: 0x0}, + 1074: {region: 0x37, script: 0x5, flags: 0x1}, + 1075: {region: 0x9a, script: 0xd9, flags: 0x0}, + 1076: {region: 0x117, script: 0x5b, flags: 0x0}, + 1077: {region: 0x115, script: 0x5b, flags: 0x0}, + 1078: {region: 0x9a, script: 0x22, flags: 0x0}, + 1079: {region: 0x162, script: 0x5b, flags: 0x0}, + 1080: {region: 0x166, script: 0x5b, flags: 0x0}, + 1081: {region: 0x166, script: 0x5b, flags: 0x0}, + 1082: {region: 0x6e, script: 0x5b, flags: 0x0}, + 1083: {region: 0x162, script: 0x5b, flags: 0x0}, + 1084: {region: 0x166, script: 0x5b, flags: 0x0}, + 1085: {region: 0x61, script: 0x5b, flags: 0x0}, + 1086: {region: 0x96, script: 0x5b, flags: 0x0}, + 1087: {region: 0x166, script: 0x5b, flags: 0x0}, + 1088: {region: 0x166, script: 0x5b, flags: 0x0}, + 1089: {region: 0x130, script: 0x5b, flags: 0x0}, + 1090: {region: 0x166, script: 0x5b, flags: 0x0}, + 1091: {region: 0x85, script: 0x5b, flags: 0x0}, + 1092: {region: 0x10d, script: 0x5b, flags: 0x0}, + 1093: {region: 0x130, script: 0x5b, flags: 0x0}, + 1094: {region: 0x160, script: 0x5, flags: 0x0}, + 1095: {region: 0x4b, script: 0x5b, flags: 0x0}, + 1096: {region: 0x61, script: 0x5b, flags: 0x0}, + 1097: {region: 0x166, script: 0x5b, flags: 0x0}, + 1098: {region: 0x9a, script: 0x22, flags: 0x0}, + 1099: {region: 0x96, script: 0x5b, flags: 0x0}, + 1100: {region: 0x166, script: 0x5b, flags: 0x0}, + 1101: {region: 0x35, script: 0xe, flags: 0x0}, + 1102: {region: 0x9c, script: 0xde, flags: 0x0}, + 1103: {region: 0xea, script: 0x5b, flags: 0x0}, + 1104: {region: 0x9a, script: 0xe6, flags: 0x0}, + 1105: {region: 0xdc, script: 0x22, flags: 0x0}, + 1106: {region: 0x166, script: 0x5b, flags: 0x0}, + 1107: {region: 0x166, script: 0x5b, flags: 0x0}, + 1108: {region: 0x166, script: 0x5b, flags: 0x0}, + 1109: {region: 0x166, script: 0x5b, flags: 0x0}, + 1110: {region: 0x166, script: 0x5b, flags: 0x0}, + 1111: {region: 0x166, script: 0x5b, flags: 0x0}, + 1112: {region: 0x166, script: 0x5b, flags: 0x0}, + 1113: {region: 0x166, script: 0x5b, flags: 0x0}, + 1114: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1115: {region: 0x166, script: 0x5b, flags: 0x0}, + 1116: {region: 0x166, script: 0x5b, flags: 0x0}, + 1117: {region: 0x9a, script: 0x53, flags: 0x0}, + 1118: {region: 0x53, script: 0xe4, flags: 0x0}, + 1119: {region: 0xdc, script: 0x22, flags: 0x0}, + 1120: {region: 0xdc, script: 0x22, flags: 0x0}, + 1121: {region: 0x9a, script: 0xe9, flags: 0x0}, + 1122: {region: 0x166, script: 0x5b, flags: 0x0}, + 1123: {region: 0x113, script: 0x5b, flags: 0x0}, + 1124: {region: 0x132, script: 0x5b, flags: 0x0}, + 1125: {region: 0x127, script: 0x5b, flags: 0x0}, + 1126: {region: 0x166, script: 0x5b, flags: 0x0}, + 1127: {region: 0x3c, script: 0x3, flags: 0x1}, + 1128: {region: 0x166, script: 0x5b, flags: 0x0}, + 1129: {region: 0x166, script: 0x5b, flags: 0x0}, + 1130: {region: 0x166, script: 0x5b, flags: 0x0}, + 1131: {region: 0x124, script: 0xee, flags: 0x0}, + 1132: {region: 0xdc, script: 0x22, flags: 0x0}, + 1133: {region: 0xdc, script: 0x22, flags: 0x0}, + 1134: {region: 0xdc, script: 0x22, flags: 0x0}, + 1135: {region: 0x70, script: 0x2c, flags: 0x0}, + 1136: {region: 0x166, script: 0x5b, flags: 0x0}, + 1137: {region: 0x6e, script: 0x2c, flags: 0x0}, + 1138: {region: 0x166, script: 0x5b, flags: 0x0}, + 1139: {region: 0x166, script: 0x5b, flags: 0x0}, + 1140: {region: 0x166, script: 0x5b, flags: 0x0}, + 1141: {region: 0xd7, script: 0x5b, flags: 0x0}, + 1142: {region: 0x128, script: 0x5b, flags: 0x0}, + 1143: {region: 0x126, script: 0x5b, flags: 0x0}, + 1144: {region: 0x32, script: 0x5b, flags: 0x0}, + 1145: {region: 0xdc, script: 0x22, flags: 0x0}, + 1146: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1147: {region: 0x166, script: 0x5b, flags: 0x0}, + 1148: {region: 0x166, script: 0x5b, flags: 0x0}, + 1149: {region: 0x32, script: 0x5b, flags: 0x0}, + 1150: {region: 0xd5, script: 0x5b, flags: 0x0}, + 1151: {region: 0x166, script: 0x5b, flags: 0x0}, + 1152: {region: 0x162, script: 0x5b, flags: 0x0}, + 1153: {region: 0x166, script: 0x5b, flags: 0x0}, + 1154: {region: 0x12a, script: 0x5b, flags: 0x0}, + 1155: {region: 0x166, script: 0x5b, flags: 0x0}, + 1156: {region: 0xcf, script: 0x5b, flags: 0x0}, + 1157: {region: 0x166, script: 0x5b, flags: 0x0}, + 1158: {region: 0xe7, script: 0x5b, flags: 0x0}, + 1159: {region: 0x166, script: 0x5b, flags: 0x0}, + 1160: {region: 0x166, script: 0x5b, flags: 0x0}, + 1161: {region: 0x166, script: 0x5b, flags: 0x0}, + 1162: {region: 0x12c, script: 0x5b, flags: 0x0}, + 1163: {region: 0x12c, script: 0x5b, flags: 0x0}, + 1164: {region: 0x12f, script: 0x5b, flags: 0x0}, + 1165: {region: 0x166, script: 0x5, flags: 0x0}, + 1166: {region: 0x162, script: 0x5b, flags: 0x0}, + 1167: {region: 0x88, script: 0x34, flags: 0x0}, + 1168: {region: 0xdc, script: 0x22, flags: 0x0}, + 1169: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1170: {region: 0x43, script: 0xef, flags: 0x0}, + 1171: {region: 0x166, script: 0x5b, flags: 0x0}, + 1172: {region: 0x107, script: 0x20, flags: 0x0}, + 1173: {region: 0x166, script: 0x5b, flags: 0x0}, + 1174: {region: 0x166, script: 0x5b, flags: 0x0}, + 1175: {region: 0x132, script: 0x5b, flags: 0x0}, + 1176: {region: 0x166, script: 0x5b, flags: 0x0}, + 1177: {region: 0x124, script: 0xee, flags: 0x0}, + 1178: {region: 0x32, script: 0x5b, flags: 0x0}, + 1179: {region: 0x166, script: 0x5b, flags: 0x0}, + 1180: {region: 0x166, script: 0x5b, flags: 0x0}, + 1181: {region: 0xcf, script: 0x5b, flags: 0x0}, + 1182: {region: 0x166, script: 0x5b, flags: 0x0}, + 1183: {region: 0x166, script: 0x5b, flags: 0x0}, + 1184: {region: 0x12e, script: 0x5b, flags: 0x0}, + 1185: {region: 0x166, script: 0x5b, flags: 0x0}, + 1187: {region: 0x166, script: 0x5b, flags: 0x0}, + 1188: {region: 0xd5, script: 0x5b, flags: 0x0}, + 1189: {region: 0x53, script: 0xe7, flags: 0x0}, + 1190: {region: 0xe6, script: 0x5b, flags: 0x0}, + 1191: {region: 0x166, script: 0x5b, flags: 0x0}, + 1192: {region: 0x107, script: 0x20, flags: 0x0}, + 1193: {region: 0xbb, script: 0x5b, flags: 0x0}, + 1194: {region: 0x166, script: 0x5b, flags: 0x0}, + 1195: {region: 0x107, script: 0x20, flags: 0x0}, + 1196: {region: 0x3f, script: 0x4, flags: 0x1}, + 1197: {region: 0x11d, script: 0xf3, flags: 0x0}, + 1198: {region: 0x131, script: 0x20, flags: 0x0}, + 1199: {region: 0x76, script: 0x5b, flags: 0x0}, + 1200: {region: 0x2a, script: 0x5b, flags: 0x0}, + 1202: {region: 0x43, script: 0x3, flags: 0x1}, + 1203: {region: 0x9a, script: 0xe, flags: 0x0}, + 1204: {region: 0xe9, script: 0x5, flags: 0x0}, + 1205: {region: 0x166, script: 0x5b, flags: 0x0}, + 1206: {region: 0x166, script: 0x5b, flags: 0x0}, + 1207: {region: 0x166, script: 0x5b, flags: 0x0}, + 1208: {region: 0x166, script: 0x5b, flags: 0x0}, + 1209: {region: 0x166, script: 0x5b, flags: 0x0}, + 1210: {region: 0x166, script: 0x5b, flags: 0x0}, + 1211: {region: 0x166, script: 0x5b, flags: 0x0}, + 1212: {region: 0x46, script: 0x4, flags: 0x1}, + 1213: {region: 0x166, script: 0x5b, flags: 0x0}, + 1214: {region: 0xb5, script: 0xf4, flags: 0x0}, + 1215: {region: 0x166, script: 0x5b, flags: 0x0}, + 1216: {region: 0x162, script: 0x5b, flags: 0x0}, + 1217: {region: 0x9f, script: 0x5b, flags: 0x0}, + 1218: {region: 0x107, script: 0x5b, flags: 0x0}, + 1219: {region: 0x13f, script: 0x5b, flags: 0x0}, + 1220: {region: 0x11c, script: 0x5b, flags: 0x0}, + 1221: {region: 0x166, script: 0x5b, flags: 0x0}, + 1222: {region: 0x36, script: 0x5b, flags: 0x0}, + 1223: {region: 0x61, script: 0x5b, flags: 0x0}, + 1224: {region: 0xd2, script: 0x5b, flags: 0x0}, + 1225: {region: 0x1, script: 0x5b, flags: 0x0}, + 1226: {region: 0x107, script: 0x5b, flags: 0x0}, + 1227: {region: 0x6b, script: 0x5b, flags: 0x0}, + 1228: {region: 0x130, script: 0x5b, flags: 0x0}, + 1229: {region: 0x166, script: 0x5b, flags: 0x0}, + 1230: {region: 0x36, script: 0x5b, flags: 0x0}, + 1231: {region: 0x4e, script: 0x5b, flags: 0x0}, + 1232: {region: 0x166, script: 0x5b, flags: 0x0}, + 1233: {region: 0x70, script: 0x2c, flags: 0x0}, + 1234: {region: 0x166, script: 0x5b, flags: 0x0}, + 1235: {region: 0xe8, script: 0x5b, flags: 0x0}, + 1236: {region: 0x2f, script: 0x5b, flags: 0x0}, + 1237: {region: 0x9a, script: 0xe9, flags: 0x0}, + 1238: {region: 0x9a, script: 0x22, flags: 0x0}, + 1239: {region: 0x166, script: 0x5b, flags: 0x0}, + 1240: {region: 0x166, script: 0x5b, flags: 0x0}, + 1241: {region: 0x166, script: 0x5b, flags: 0x0}, + 1242: {region: 0x166, script: 0x5b, flags: 0x0}, + 1243: {region: 0x166, script: 0x5b, flags: 0x0}, + 1244: {region: 0x166, script: 0x5b, flags: 0x0}, + 1245: {region: 0x166, script: 0x5b, flags: 0x0}, + 1246: {region: 0x166, script: 0x5b, flags: 0x0}, + 1247: {region: 0x166, script: 0x5b, flags: 0x0}, + 1248: {region: 0x141, script: 0x5b, flags: 0x0}, + 1249: {region: 0x166, script: 0x5b, flags: 0x0}, + 1250: {region: 0x166, script: 0x5b, flags: 0x0}, + 1251: {region: 0xa9, script: 0x5, flags: 0x0}, + 1252: {region: 0x166, script: 0x5b, flags: 0x0}, + 1253: {region: 0x115, script: 0x5b, flags: 0x0}, + 1254: {region: 0x166, script: 0x5b, flags: 0x0}, + 1255: {region: 0x166, script: 0x5b, flags: 0x0}, + 1256: {region: 0x166, script: 0x5b, flags: 0x0}, + 1257: {region: 0x166, script: 0x5b, flags: 0x0}, + 1258: {region: 0x9a, script: 0x22, flags: 0x0}, + 1259: {region: 0x53, script: 0x3b, flags: 0x0}, + 1260: {region: 0x166, script: 0x5b, flags: 0x0}, + 1261: {region: 0x166, script: 0x5b, flags: 0x0}, + 1262: {region: 0x41, script: 0x5b, flags: 0x0}, + 1263: {region: 0x166, script: 0x5b, flags: 0x0}, + 1264: {region: 0x12c, script: 0x18, flags: 0x0}, + 1265: {region: 0x166, script: 0x5b, flags: 0x0}, + 1266: {region: 0x162, script: 0x5b, flags: 0x0}, + 1267: {region: 0x166, script: 0x5b, flags: 0x0}, + 1268: {region: 0x12c, script: 0x63, flags: 0x0}, + 1269: {region: 0x12c, script: 0x64, flags: 0x0}, + 1270: {region: 0x7e, script: 0x2e, flags: 0x0}, + 1271: {region: 0x53, script: 0x68, flags: 0x0}, + 1272: {region: 0x10c, script: 0x6d, flags: 0x0}, + 1273: {region: 0x109, script: 0x79, flags: 0x0}, + 1274: {region: 0x9a, script: 0x22, flags: 0x0}, + 1275: {region: 0x132, script: 0x5b, flags: 0x0}, + 1276: {region: 0x166, script: 0x5b, flags: 0x0}, + 1277: {region: 0x9d, script: 0x93, flags: 0x0}, + 1278: {region: 0x166, script: 0x5b, flags: 0x0}, + 1279: {region: 0x15f, script: 0xce, flags: 0x0}, + 1280: {region: 0x166, script: 0x5b, flags: 0x0}, + 1281: {region: 0x166, script: 0x5b, flags: 0x0}, + 1282: {region: 0xdc, script: 0x22, flags: 0x0}, + 1283: {region: 0x166, script: 0x5b, flags: 0x0}, + 1284: {region: 0x166, script: 0x5b, flags: 0x0}, + 1285: {region: 0xd2, script: 0x5b, flags: 0x0}, + 1286: {region: 0x76, script: 0x5b, flags: 0x0}, + 1287: {region: 0x166, script: 0x5b, flags: 0x0}, + 1288: {region: 0x166, script: 0x5b, flags: 0x0}, + 1289: {region: 0x52, script: 0x5b, flags: 0x0}, + 1290: {region: 0x166, script: 0x5b, flags: 0x0}, + 1291: {region: 0x166, script: 0x5b, flags: 0x0}, + 1292: {region: 0x166, script: 0x5b, flags: 0x0}, + 1293: {region: 0x52, script: 0x5b, flags: 0x0}, + 1294: {region: 0x166, script: 0x5b, flags: 0x0}, + 1295: {region: 0x166, script: 0x5b, flags: 0x0}, + 1296: {region: 0x166, script: 0x5b, flags: 0x0}, + 1297: {region: 0x166, script: 0x5b, flags: 0x0}, + 1298: {region: 0x1, script: 0x3e, flags: 0x0}, + 1299: {region: 0x166, script: 0x5b, flags: 0x0}, + 1300: {region: 0x166, script: 0x5b, flags: 0x0}, + 1301: {region: 0x166, script: 0x5b, flags: 0x0}, + 1302: {region: 0x166, script: 0x5b, flags: 0x0}, + 1303: {region: 0x166, script: 0x5b, flags: 0x0}, + 1304: {region: 0xd7, script: 0x5b, flags: 0x0}, + 1305: {region: 0x166, script: 0x5b, flags: 0x0}, + 1306: {region: 0x166, script: 0x5b, flags: 0x0}, + 1307: {region: 0x166, script: 0x5b, flags: 0x0}, + 1308: {region: 0x41, script: 0x5b, flags: 0x0}, + 1309: {region: 0x166, script: 0x5b, flags: 0x0}, + 1310: {region: 0xd0, script: 0x5b, flags: 0x0}, + 1311: {region: 0x4a, script: 0x3, flags: 0x1}, + 1312: {region: 0x166, script: 0x5b, flags: 0x0}, + 1313: {region: 0x166, script: 0x5b, flags: 0x0}, + 1314: {region: 0x166, script: 0x5b, flags: 0x0}, + 1315: {region: 0x53, script: 0x5b, flags: 0x0}, + 1316: {region: 0x10c, script: 0x5b, flags: 0x0}, + 1318: {region: 0xa9, script: 0x5, flags: 0x0}, + 1319: {region: 0xda, script: 0x5b, flags: 0x0}, + 1320: {region: 0xbb, script: 0xeb, flags: 0x0}, + 1321: {region: 0x4d, script: 0x14, flags: 0x1}, + 1322: {region: 0x53, script: 0x7f, flags: 0x0}, + 1323: {region: 0x166, script: 0x5b, flags: 0x0}, + 1324: {region: 0x123, script: 0x5b, flags: 0x0}, + 1325: {region: 0xd1, script: 0x5b, flags: 0x0}, + 1326: {region: 0x166, script: 0x5b, flags: 0x0}, + 1327: {region: 0x162, script: 0x5b, flags: 0x0}, + 1329: {region: 0x12c, script: 0x5b, flags: 0x0}, +} + +// likelyLangList holds lists info associated with likelyLang. +// Size: 582 bytes, 97 elements +var likelyLangList = [97]likelyScriptRegion{ + 0: {region: 0x9d, script: 0x7, flags: 0x0}, + 1: {region: 0xa2, script: 0x7a, flags: 0x2}, + 2: {region: 0x11d, script: 0x87, flags: 0x2}, + 3: {region: 0x32, script: 0x5b, flags: 0x0}, + 4: {region: 0x9c, script: 0x5, flags: 0x4}, + 5: {region: 0x9d, script: 0x5, flags: 0x4}, + 6: {region: 0x107, script: 0x20, flags: 0x4}, + 7: {region: 0x9d, script: 0x5, flags: 0x2}, + 8: {region: 0x107, script: 0x20, flags: 0x0}, + 9: {region: 0x38, script: 0x2f, flags: 0x2}, + 10: {region: 0x136, script: 0x5b, flags: 0x0}, + 11: {region: 0x7c, script: 0xd1, flags: 0x2}, + 12: {region: 0x115, script: 0x5b, flags: 0x0}, + 13: {region: 0x85, script: 0x1, flags: 0x2}, + 14: {region: 0x5e, script: 0x1f, flags: 0x0}, + 15: {region: 0x88, script: 0x60, flags: 0x2}, + 16: {region: 0xd7, script: 0x5b, flags: 0x0}, + 17: {region: 0x52, script: 0x5, flags: 0x4}, + 18: {region: 0x10c, script: 0x5, flags: 0x4}, + 19: {region: 0xaf, script: 0x20, flags: 0x0}, + 20: {region: 0x24, script: 0x5, flags: 0x4}, + 21: {region: 0x53, script: 0x5, flags: 0x4}, + 22: {region: 0x9d, script: 0x5, flags: 0x4}, + 23: {region: 0xc6, script: 0x5, flags: 0x4}, + 24: {region: 0x53, script: 0x5, flags: 0x2}, + 25: {region: 0x12c, script: 0x5b, flags: 0x0}, + 26: {region: 0xb1, script: 0x5, flags: 0x4}, + 27: {region: 0x9c, script: 0x5, flags: 0x2}, + 28: {region: 0xa6, script: 0x20, flags: 0x0}, + 29: {region: 0x53, script: 0x5, flags: 0x4}, + 30: {region: 0x12c, script: 0x5b, flags: 0x4}, + 31: {region: 0x53, script: 0x5, flags: 0x2}, + 32: {region: 0x12c, script: 0x5b, flags: 0x2}, + 33: {region: 0xdc, script: 0x22, flags: 0x0}, + 34: {region: 0x9a, script: 0x5e, flags: 0x2}, + 35: {region: 0x84, script: 0x5b, flags: 0x0}, + 36: {region: 0x85, script: 0x7e, flags: 0x4}, + 37: {region: 0x85, script: 0x7e, flags: 0x2}, + 38: {region: 0xc6, script: 0x20, flags: 0x0}, + 39: {region: 0x53, script: 0x71, flags: 0x4}, + 40: {region: 0x53, script: 0x71, flags: 0x2}, + 41: {region: 0xd1, script: 0x5b, flags: 0x0}, + 42: {region: 0x4a, script: 0x5, flags: 0x4}, + 43: {region: 0x96, script: 0x5, flags: 0x4}, + 44: {region: 0x9a, script: 0x36, flags: 0x0}, + 45: {region: 0xe9, script: 0x5, flags: 0x4}, + 46: {region: 0xe9, script: 0x5, flags: 0x2}, + 47: {region: 0x9d, script: 0x8d, flags: 0x0}, + 48: {region: 0x53, script: 0x8e, flags: 0x2}, + 49: {region: 0xbb, script: 0xeb, flags: 0x0}, + 50: {region: 0xda, script: 0x5b, flags: 0x4}, + 51: {region: 0xe9, script: 0x5, flags: 0x0}, + 52: {region: 0x9a, script: 0x22, flags: 0x2}, + 53: {region: 0x9a, script: 0x50, flags: 0x2}, + 54: {region: 0x9a, script: 0xd5, flags: 0x2}, + 55: {region: 0x106, script: 0x20, flags: 0x0}, + 56: {region: 0xbe, script: 0x5b, flags: 0x4}, + 57: {region: 0x105, script: 0x5b, flags: 0x4}, + 58: {region: 0x107, script: 0x5b, flags: 0x4}, + 59: {region: 0x12c, script: 0x5b, flags: 0x4}, + 60: {region: 0x125, script: 0x20, flags: 0x0}, + 61: {region: 0xe9, script: 0x5, flags: 0x4}, + 62: {region: 0xe9, script: 0x5, flags: 0x2}, + 63: {region: 0x53, script: 0x5, flags: 0x0}, + 64: {region: 0xaf, script: 0x20, flags: 0x4}, + 65: {region: 0xc6, script: 0x20, flags: 0x4}, + 66: {region: 0xaf, script: 0x20, flags: 0x2}, + 67: {region: 0x9a, script: 0xe, flags: 0x0}, + 68: {region: 0xdc, script: 0x22, flags: 0x4}, + 69: {region: 0xdc, script: 0x22, flags: 0x2}, + 70: {region: 0x138, script: 0x5b, flags: 0x0}, + 71: {region: 0x24, script: 0x5, flags: 0x4}, + 72: {region: 0x53, script: 0x20, flags: 0x4}, + 73: {region: 0x24, script: 0x5, flags: 0x2}, + 74: {region: 0x8e, script: 0x3c, flags: 0x0}, + 75: {region: 0x53, script: 0x3b, flags: 0x4}, + 76: {region: 0x53, script: 0x3b, flags: 0x2}, + 77: {region: 0x53, script: 0x3b, flags: 0x0}, + 78: {region: 0x2f, script: 0x3c, flags: 0x4}, + 79: {region: 0x3e, script: 0x3c, flags: 0x4}, + 80: {region: 0x7c, script: 0x3c, flags: 0x4}, + 81: {region: 0x7f, script: 0x3c, flags: 0x4}, + 82: {region: 0x8e, script: 0x3c, flags: 0x4}, + 83: {region: 0x96, script: 0x3c, flags: 0x4}, + 84: {region: 0xc7, script: 0x3c, flags: 0x4}, + 85: {region: 0xd1, script: 0x3c, flags: 0x4}, + 86: {region: 0xe3, script: 0x3c, flags: 0x4}, + 87: {region: 0xe6, script: 0x3c, flags: 0x4}, + 88: {region: 0xe8, script: 0x3c, flags: 0x4}, + 89: {region: 0x117, script: 0x3c, flags: 0x4}, + 90: {region: 0x124, script: 0x3c, flags: 0x4}, + 91: {region: 0x12f, script: 0x3c, flags: 0x4}, + 92: {region: 0x136, script: 0x3c, flags: 0x4}, + 93: {region: 0x13f, script: 0x3c, flags: 0x4}, + 94: {region: 0x12f, script: 0x11, flags: 0x2}, + 95: {region: 0x12f, script: 0x37, flags: 0x2}, + 96: {region: 0x12f, script: 0x3c, flags: 0x2}, +} + +type likelyLangScript struct { + lang uint16 + script uint16 + flags uint8 +} + +// likelyRegion is a lookup table, indexed by regionID, for the most likely +// languages and scripts given incomplete information. If more entries exist +// for a given regionID, lang and script are the index and size respectively +// of the list in likelyRegionList. +// TODO: exclude containers and user-definable regions from the list. +// Size: 2154 bytes, 359 elements +var likelyRegion = [359]likelyLangScript{ + 34: {lang: 0xd7, script: 0x5b, flags: 0x0}, + 35: {lang: 0x3a, script: 0x5, flags: 0x0}, + 36: {lang: 0x0, script: 0x2, flags: 0x1}, + 39: {lang: 0x2, script: 0x2, flags: 0x1}, + 40: {lang: 0x4, script: 0x2, flags: 0x1}, + 42: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 43: {lang: 0x0, script: 0x5b, flags: 0x0}, + 44: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 45: {lang: 0x41b, script: 0x5b, flags: 0x0}, + 46: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 48: {lang: 0x367, script: 0x5b, flags: 0x0}, + 49: {lang: 0x444, script: 0x5b, flags: 0x0}, + 50: {lang: 0x58, script: 0x5b, flags: 0x0}, + 51: {lang: 0x6, script: 0x2, flags: 0x1}, + 53: {lang: 0xa5, script: 0xe, flags: 0x0}, + 54: {lang: 0x367, script: 0x5b, flags: 0x0}, + 55: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 56: {lang: 0x7e, script: 0x20, flags: 0x0}, + 57: {lang: 0x3a, script: 0x5, flags: 0x0}, + 58: {lang: 0x3d9, script: 0x5b, flags: 0x0}, + 59: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 60: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 62: {lang: 0x31f, script: 0x5b, flags: 0x0}, + 63: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 64: {lang: 0x3a1, script: 0x5b, flags: 0x0}, + 65: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 67: {lang: 0x8, script: 0x2, flags: 0x1}, + 69: {lang: 0x0, script: 0x5b, flags: 0x0}, + 71: {lang: 0x71, script: 0x20, flags: 0x0}, + 73: {lang: 0x512, script: 0x3e, flags: 0x2}, + 74: {lang: 0x31f, script: 0x5, flags: 0x2}, + 75: {lang: 0x445, script: 0x5b, flags: 0x0}, + 76: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 77: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 78: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 79: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 81: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 82: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 83: {lang: 0xa, script: 0x4, flags: 0x1}, + 84: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 85: {lang: 0x0, script: 0x5b, flags: 0x0}, + 87: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 90: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 91: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 92: {lang: 0x3a1, script: 0x5b, flags: 0x0}, + 94: {lang: 0xe, script: 0x2, flags: 0x1}, + 95: {lang: 0xfa, script: 0x5b, flags: 0x0}, + 97: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 99: {lang: 0x1, script: 0x5b, flags: 0x0}, + 100: {lang: 0x101, script: 0x5b, flags: 0x0}, + 102: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 104: {lang: 0x10, script: 0x2, flags: 0x1}, + 105: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 106: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 107: {lang: 0x140, script: 0x5b, flags: 0x0}, + 108: {lang: 0x3a, script: 0x5, flags: 0x0}, + 109: {lang: 0x3a, script: 0x5, flags: 0x0}, + 110: {lang: 0x46f, script: 0x2c, flags: 0x0}, + 111: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 112: {lang: 0x12, script: 0x2, flags: 0x1}, + 114: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 115: {lang: 0x151, script: 0x5b, flags: 0x0}, + 116: {lang: 0x1c0, script: 0x22, flags: 0x2}, + 119: {lang: 0x158, script: 0x5b, flags: 0x0}, + 121: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 123: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 124: {lang: 0x14, script: 0x2, flags: 0x1}, + 126: {lang: 0x16, script: 0x3, flags: 0x1}, + 127: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 129: {lang: 0x21, script: 0x5b, flags: 0x0}, + 131: {lang: 0x245, script: 0x5b, flags: 0x0}, + 133: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 134: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 135: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 136: {lang: 0x19, script: 0x2, flags: 0x1}, + 137: {lang: 0x0, script: 0x5b, flags: 0x0}, + 138: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 140: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 142: {lang: 0x529, script: 0x3c, flags: 0x0}, + 143: {lang: 0x0, script: 0x5b, flags: 0x0}, + 144: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 145: {lang: 0x1d1, script: 0x5b, flags: 0x0}, + 146: {lang: 0x1d4, script: 0x5b, flags: 0x0}, + 147: {lang: 0x1d5, script: 0x5b, flags: 0x0}, + 149: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 150: {lang: 0x1b, script: 0x2, flags: 0x1}, + 152: {lang: 0x1bc, script: 0x3e, flags: 0x0}, + 154: {lang: 0x1d, script: 0x3, flags: 0x1}, + 156: {lang: 0x3a, script: 0x5, flags: 0x0}, + 157: {lang: 0x20, script: 0x2, flags: 0x1}, + 158: {lang: 0x1f8, script: 0x5b, flags: 0x0}, + 159: {lang: 0x1f9, script: 0x5b, flags: 0x0}, + 162: {lang: 0x3a, script: 0x5, flags: 0x0}, + 163: {lang: 0x200, script: 0x49, flags: 0x0}, + 165: {lang: 0x445, script: 0x5b, flags: 0x0}, + 166: {lang: 0x28a, script: 0x20, flags: 0x0}, + 167: {lang: 0x22, script: 0x3, flags: 0x1}, + 169: {lang: 0x25, script: 0x2, flags: 0x1}, + 171: {lang: 0x254, script: 0x54, flags: 0x0}, + 172: {lang: 0x254, script: 0x54, flags: 0x0}, + 173: {lang: 0x3a, script: 0x5, flags: 0x0}, + 175: {lang: 0x3e2, script: 0x20, flags: 0x0}, + 176: {lang: 0x27, script: 0x2, flags: 0x1}, + 177: {lang: 0x3a, script: 0x5, flags: 0x0}, + 179: {lang: 0x10d, script: 0x5b, flags: 0x0}, + 180: {lang: 0x40c, script: 0xd6, flags: 0x0}, + 182: {lang: 0x43b, script: 0x5b, flags: 0x0}, + 183: {lang: 0x2c0, script: 0x5b, flags: 0x0}, + 184: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 185: {lang: 0x2c7, script: 0x5b, flags: 0x0}, + 186: {lang: 0x3a, script: 0x5, flags: 0x0}, + 187: {lang: 0x29, script: 0x2, flags: 0x1}, + 188: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 189: {lang: 0x2b, script: 0x2, flags: 0x1}, + 190: {lang: 0x432, script: 0x5b, flags: 0x0}, + 191: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 192: {lang: 0x2f1, script: 0x5b, flags: 0x0}, + 195: {lang: 0x2d, script: 0x2, flags: 0x1}, + 196: {lang: 0xa0, script: 0x5b, flags: 0x0}, + 197: {lang: 0x2f, script: 0x2, flags: 0x1}, + 198: {lang: 0x31, script: 0x2, flags: 0x1}, + 199: {lang: 0x33, script: 0x2, flags: 0x1}, + 201: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 202: {lang: 0x35, script: 0x2, flags: 0x1}, + 204: {lang: 0x320, script: 0x5b, flags: 0x0}, + 205: {lang: 0x37, script: 0x3, flags: 0x1}, + 206: {lang: 0x128, script: 0xed, flags: 0x0}, + 208: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 209: {lang: 0x31f, script: 0x5b, flags: 0x0}, + 210: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 211: {lang: 0x16, script: 0x5b, flags: 0x0}, + 212: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 213: {lang: 0x1b4, script: 0x5b, flags: 0x0}, + 215: {lang: 0x1b4, script: 0x5, flags: 0x2}, + 217: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 218: {lang: 0x367, script: 0x5b, flags: 0x0}, + 219: {lang: 0x347, script: 0x5b, flags: 0x0}, + 220: {lang: 0x351, script: 0x22, flags: 0x0}, + 226: {lang: 0x3a, script: 0x5, flags: 0x0}, + 227: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 229: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 230: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 231: {lang: 0x486, script: 0x5b, flags: 0x0}, + 232: {lang: 0x153, script: 0x5b, flags: 0x0}, + 233: {lang: 0x3a, script: 0x3, flags: 0x1}, + 234: {lang: 0x3b3, script: 0x5b, flags: 0x0}, + 235: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 237: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 238: {lang: 0x3a, script: 0x5, flags: 0x0}, + 239: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 241: {lang: 0x3a2, script: 0x5b, flags: 0x0}, + 242: {lang: 0x194, script: 0x5b, flags: 0x0}, + 244: {lang: 0x3a, script: 0x5, flags: 0x0}, + 259: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 261: {lang: 0x3d, script: 0x2, flags: 0x1}, + 262: {lang: 0x432, script: 0x20, flags: 0x0}, + 263: {lang: 0x3f, script: 0x2, flags: 0x1}, + 264: {lang: 0x3e5, script: 0x5b, flags: 0x0}, + 265: {lang: 0x3a, script: 0x5, flags: 0x0}, + 267: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 268: {lang: 0x3a, script: 0x5, flags: 0x0}, + 269: {lang: 0x41, script: 0x2, flags: 0x1}, + 272: {lang: 0x416, script: 0x5b, flags: 0x0}, + 273: {lang: 0x347, script: 0x5b, flags: 0x0}, + 274: {lang: 0x43, script: 0x2, flags: 0x1}, + 276: {lang: 0x1f9, script: 0x5b, flags: 0x0}, + 277: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 278: {lang: 0x429, script: 0x5b, flags: 0x0}, + 279: {lang: 0x367, script: 0x5b, flags: 0x0}, + 281: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 283: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 285: {lang: 0x45, script: 0x2, flags: 0x1}, + 289: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 290: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 291: {lang: 0x47, script: 0x2, flags: 0x1}, + 292: {lang: 0x49, script: 0x3, flags: 0x1}, + 293: {lang: 0x4c, script: 0x2, flags: 0x1}, + 294: {lang: 0x477, script: 0x5b, flags: 0x0}, + 295: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 296: {lang: 0x476, script: 0x5b, flags: 0x0}, + 297: {lang: 0x4e, script: 0x2, flags: 0x1}, + 298: {lang: 0x482, script: 0x5b, flags: 0x0}, + 300: {lang: 0x50, script: 0x4, flags: 0x1}, + 302: {lang: 0x4a0, script: 0x5b, flags: 0x0}, + 303: {lang: 0x54, script: 0x2, flags: 0x1}, + 304: {lang: 0x445, script: 0x5b, flags: 0x0}, + 305: {lang: 0x56, script: 0x3, flags: 0x1}, + 306: {lang: 0x445, script: 0x5b, flags: 0x0}, + 310: {lang: 0x512, script: 0x3e, flags: 0x2}, + 311: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 312: {lang: 0x4bc, script: 0x5b, flags: 0x0}, + 313: {lang: 0x1f9, script: 0x5b, flags: 0x0}, + 316: {lang: 0x13e, script: 0x5b, flags: 0x0}, + 319: {lang: 0x4c3, script: 0x5b, flags: 0x0}, + 320: {lang: 0x8a, script: 0x5b, flags: 0x0}, + 321: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 323: {lang: 0x41b, script: 0x5b, flags: 0x0}, + 334: {lang: 0x59, script: 0x2, flags: 0x1}, + 351: {lang: 0x3a, script: 0x5, flags: 0x0}, + 352: {lang: 0x5b, script: 0x2, flags: 0x1}, + 357: {lang: 0x423, script: 0x5b, flags: 0x0}, +} + +// likelyRegionList holds lists info associated with likelyRegion. +// Size: 558 bytes, 93 elements +var likelyRegionList = [93]likelyLangScript{ + 0: {lang: 0x148, script: 0x5, flags: 0x0}, + 1: {lang: 0x476, script: 0x5b, flags: 0x0}, + 2: {lang: 0x431, script: 0x5b, flags: 0x0}, + 3: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 4: {lang: 0x1d7, script: 0x8, flags: 0x0}, + 5: {lang: 0x274, script: 0x5b, flags: 0x0}, + 6: {lang: 0xb7, script: 0x5b, flags: 0x0}, + 7: {lang: 0x432, script: 0x20, flags: 0x0}, + 8: {lang: 0x12d, script: 0xef, flags: 0x0}, + 9: {lang: 0x351, script: 0x22, flags: 0x0}, + 10: {lang: 0x529, script: 0x3b, flags: 0x0}, + 11: {lang: 0x4ac, script: 0x5, flags: 0x0}, + 12: {lang: 0x523, script: 0x5b, flags: 0x0}, + 13: {lang: 0x29a, script: 0xee, flags: 0x0}, + 14: {lang: 0x136, script: 0x34, flags: 0x0}, + 15: {lang: 0x48a, script: 0x5b, flags: 0x0}, + 16: {lang: 0x3a, script: 0x5, flags: 0x0}, + 17: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 18: {lang: 0x27, script: 0x2c, flags: 0x0}, + 19: {lang: 0x139, script: 0x5b, flags: 0x0}, + 20: {lang: 0x26a, script: 0x5, flags: 0x2}, + 21: {lang: 0x512, script: 0x3e, flags: 0x2}, + 22: {lang: 0x210, script: 0x2e, flags: 0x0}, + 23: {lang: 0x5, script: 0x20, flags: 0x0}, + 24: {lang: 0x274, script: 0x5b, flags: 0x0}, + 25: {lang: 0x136, script: 0x34, flags: 0x0}, + 26: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 27: {lang: 0x1e1, script: 0x5b, flags: 0x0}, + 28: {lang: 0x31f, script: 0x5, flags: 0x0}, + 29: {lang: 0x1be, script: 0x22, flags: 0x0}, + 30: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 31: {lang: 0x236, script: 0x76, flags: 0x0}, + 32: {lang: 0x148, script: 0x5, flags: 0x0}, + 33: {lang: 0x476, script: 0x5b, flags: 0x0}, + 34: {lang: 0x24a, script: 0x4f, flags: 0x0}, + 35: {lang: 0xe6, script: 0x5, flags: 0x0}, + 36: {lang: 0x226, script: 0xee, flags: 0x0}, + 37: {lang: 0x3a, script: 0x5, flags: 0x0}, + 38: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 39: {lang: 0x2b8, script: 0x58, flags: 0x0}, + 40: {lang: 0x226, script: 0xee, flags: 0x0}, + 41: {lang: 0x3a, script: 0x5, flags: 0x0}, + 42: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 43: {lang: 0x3dc, script: 0x5b, flags: 0x0}, + 44: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 45: {lang: 0x2ff, script: 0x20, flags: 0x0}, + 46: {lang: 0x431, script: 0x5b, flags: 0x0}, + 47: {lang: 0x331, script: 0x76, flags: 0x0}, + 48: {lang: 0x213, script: 0x5b, flags: 0x0}, + 49: {lang: 0x30b, script: 0x20, flags: 0x0}, + 50: {lang: 0x242, script: 0x5, flags: 0x0}, + 51: {lang: 0x529, script: 0x3c, flags: 0x0}, + 52: {lang: 0x3c0, script: 0x5b, flags: 0x0}, + 53: {lang: 0x3a, script: 0x5, flags: 0x0}, + 54: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 55: {lang: 0x2ed, script: 0x5b, flags: 0x0}, + 56: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 57: {lang: 0x88, script: 0x22, flags: 0x0}, + 58: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 59: {lang: 0x4b4, script: 0x5, flags: 0x0}, + 60: {lang: 0xbe, script: 0x22, flags: 0x0}, + 61: {lang: 0x3dc, script: 0x5b, flags: 0x0}, + 62: {lang: 0x7e, script: 0x20, flags: 0x0}, + 63: {lang: 0x3e2, script: 0x20, flags: 0x0}, + 64: {lang: 0x267, script: 0x5b, flags: 0x0}, + 65: {lang: 0x444, script: 0x5b, flags: 0x0}, + 66: {lang: 0x512, script: 0x3e, flags: 0x0}, + 67: {lang: 0x412, script: 0x5b, flags: 0x0}, + 68: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 69: {lang: 0x3a, script: 0x5, flags: 0x0}, + 70: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 71: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 72: {lang: 0x35, script: 0x5, flags: 0x0}, + 73: {lang: 0x46b, script: 0xee, flags: 0x0}, + 74: {lang: 0x2ec, script: 0x5, flags: 0x0}, + 75: {lang: 0x30f, script: 0x76, flags: 0x0}, + 76: {lang: 0x467, script: 0x20, flags: 0x0}, + 77: {lang: 0x148, script: 0x5, flags: 0x0}, + 78: {lang: 0x3a, script: 0x5, flags: 0x0}, + 79: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 80: {lang: 0x48a, script: 0x5b, flags: 0x0}, + 81: {lang: 0x58, script: 0x5, flags: 0x0}, + 82: {lang: 0x219, script: 0x20, flags: 0x0}, + 83: {lang: 0x81, script: 0x34, flags: 0x0}, + 84: {lang: 0x529, script: 0x3c, flags: 0x0}, + 85: {lang: 0x48c, script: 0x5b, flags: 0x0}, + 86: {lang: 0x4ae, script: 0x20, flags: 0x0}, + 87: {lang: 0x512, script: 0x3e, flags: 0x0}, + 88: {lang: 0x3b3, script: 0x5b, flags: 0x0}, + 89: {lang: 0x431, script: 0x5b, flags: 0x0}, + 90: {lang: 0x432, script: 0x20, flags: 0x0}, + 91: {lang: 0x15e, script: 0x5b, flags: 0x0}, + 92: {lang: 0x446, script: 0x5, flags: 0x0}, +} + +type likelyTag struct { + lang uint16 + region uint16 + script uint16 +} + +// Size: 198 bytes, 33 elements +var likelyRegionGroup = [33]likelyTag{ + 1: {lang: 0x139, region: 0xd7, script: 0x5b}, + 2: {lang: 0x139, region: 0x136, script: 0x5b}, + 3: {lang: 0x3c0, region: 0x41, script: 0x5b}, + 4: {lang: 0x139, region: 0x2f, script: 0x5b}, + 5: {lang: 0x139, region: 0xd7, script: 0x5b}, + 6: {lang: 0x13e, region: 0xd0, script: 0x5b}, + 7: {lang: 0x445, region: 0x130, script: 0x5b}, + 8: {lang: 0x3a, region: 0x6c, script: 0x5}, + 9: {lang: 0x445, region: 0x4b, script: 0x5b}, + 10: {lang: 0x139, region: 0x162, script: 0x5b}, + 11: {lang: 0x139, region: 0x136, script: 0x5b}, + 12: {lang: 0x139, region: 0x136, script: 0x5b}, + 13: {lang: 0x13e, region: 0x5a, script: 0x5b}, + 14: {lang: 0x529, region: 0x53, script: 0x3b}, + 15: {lang: 0x1be, region: 0x9a, script: 0x22}, + 16: {lang: 0x1e1, region: 0x96, script: 0x5b}, + 17: {lang: 0x1f9, region: 0x9f, script: 0x5b}, + 18: {lang: 0x139, region: 0x2f, script: 0x5b}, + 19: {lang: 0x139, region: 0xe7, script: 0x5b}, + 20: {lang: 0x139, region: 0x8b, script: 0x5b}, + 21: {lang: 0x41b, region: 0x143, script: 0x5b}, + 22: {lang: 0x529, region: 0x53, script: 0x3b}, + 23: {lang: 0x4bc, region: 0x138, script: 0x5b}, + 24: {lang: 0x3a, region: 0x109, script: 0x5}, + 25: {lang: 0x3e2, region: 0x107, script: 0x20}, + 26: {lang: 0x3e2, region: 0x107, script: 0x20}, + 27: {lang: 0x139, region: 0x7c, script: 0x5b}, + 28: {lang: 0x10d, region: 0x61, script: 0x5b}, + 29: {lang: 0x139, region: 0xd7, script: 0x5b}, + 30: {lang: 0x13e, region: 0x1f, script: 0x5b}, + 31: {lang: 0x139, region: 0x9b, script: 0x5b}, + 32: {lang: 0x139, region: 0x7c, script: 0x5b}, +} + +// Size: 264 bytes, 33 elements +var regionContainment = [33]uint64{ + // Entry 0 - 1F + 0x00000001ffffffff, 0x00000000200007a2, 0x0000000000003044, 0x0000000000000008, + 0x00000000803c0010, 0x0000000000000020, 0x0000000000000040, 0x0000000000000080, + 0x0000000000000100, 0x0000000000000200, 0x0000000000000400, 0x000000004000384c, + 0x0000000000001000, 0x0000000000002000, 0x0000000000004000, 0x0000000000008000, + 0x0000000000010000, 0x0000000000020000, 0x0000000000040000, 0x0000000000080000, + 0x0000000000100000, 0x0000000000200000, 0x0000000001c1c000, 0x0000000000800000, + 0x0000000001000000, 0x000000001e020000, 0x0000000004000000, 0x0000000008000000, + 0x0000000010000000, 0x00000000200006a0, 0x0000000040002048, 0x0000000080000000, + // Entry 20 - 3F + 0x0000000100000000, +} + +// regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +// where each set holds all groupings that are directly connected in a region +// containment graph. +// Size: 359 bytes, 359 elements +var regionInclusion = [359]uint8{ + // Entry 0 - 3F + 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x26, 0x23, + 0x24, 0x26, 0x27, 0x22, 0x28, 0x29, 0x2a, 0x2b, + 0x26, 0x2c, 0x24, 0x23, 0x26, 0x25, 0x2a, 0x2d, + 0x2e, 0x24, 0x2f, 0x2d, 0x26, 0x30, 0x31, 0x28, + // Entry 40 - 7F + 0x26, 0x28, 0x26, 0x25, 0x31, 0x22, 0x32, 0x33, + 0x34, 0x30, 0x22, 0x27, 0x27, 0x27, 0x35, 0x2d, + 0x29, 0x28, 0x27, 0x36, 0x28, 0x22, 0x21, 0x34, + 0x23, 0x21, 0x26, 0x2d, 0x26, 0x22, 0x37, 0x2e, + 0x35, 0x2a, 0x22, 0x2f, 0x38, 0x26, 0x26, 0x21, + 0x39, 0x39, 0x28, 0x38, 0x39, 0x39, 0x2f, 0x3a, + 0x2f, 0x20, 0x21, 0x38, 0x3b, 0x28, 0x3c, 0x2c, + 0x21, 0x2a, 0x35, 0x27, 0x38, 0x26, 0x24, 0x28, + // Entry 80 - BF + 0x2c, 0x2d, 0x23, 0x30, 0x2d, 0x2d, 0x26, 0x27, + 0x3a, 0x22, 0x34, 0x3c, 0x2d, 0x28, 0x36, 0x22, + 0x34, 0x3a, 0x26, 0x2e, 0x21, 0x39, 0x31, 0x38, + 0x24, 0x2c, 0x25, 0x22, 0x24, 0x25, 0x2c, 0x3a, + 0x2c, 0x26, 0x24, 0x36, 0x21, 0x2f, 0x3d, 0x31, + 0x3c, 0x2f, 0x26, 0x36, 0x36, 0x24, 0x26, 0x3d, + 0x31, 0x24, 0x26, 0x35, 0x25, 0x2d, 0x32, 0x38, + 0x2a, 0x38, 0x39, 0x39, 0x35, 0x33, 0x23, 0x26, + // Entry C0 - FF + 0x2f, 0x3c, 0x21, 0x23, 0x2d, 0x31, 0x36, 0x36, + 0x3c, 0x26, 0x2d, 0x26, 0x3a, 0x2f, 0x25, 0x2f, + 0x34, 0x31, 0x2f, 0x32, 0x3b, 0x2d, 0x2b, 0x2d, + 0x21, 0x34, 0x2a, 0x2c, 0x25, 0x21, 0x3c, 0x24, + 0x29, 0x2b, 0x24, 0x34, 0x21, 0x28, 0x29, 0x3b, + 0x31, 0x25, 0x2e, 0x30, 0x29, 0x26, 0x24, 0x3a, + 0x21, 0x3c, 0x28, 0x21, 0x24, 0x21, 0x21, 0x1f, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + // Entry 100 - 13F + 0x21, 0x21, 0x21, 0x2f, 0x21, 0x2e, 0x23, 0x33, + 0x2f, 0x24, 0x3b, 0x2f, 0x39, 0x38, 0x31, 0x2d, + 0x3a, 0x2c, 0x2e, 0x2d, 0x23, 0x2d, 0x2f, 0x28, + 0x2f, 0x27, 0x33, 0x34, 0x26, 0x24, 0x32, 0x22, + 0x26, 0x27, 0x22, 0x2d, 0x31, 0x3d, 0x29, 0x31, + 0x3d, 0x39, 0x29, 0x31, 0x24, 0x26, 0x29, 0x36, + 0x2f, 0x33, 0x2f, 0x21, 0x22, 0x21, 0x30, 0x28, + 0x3d, 0x23, 0x26, 0x21, 0x28, 0x26, 0x26, 0x31, + // Entry 140 - 17F + 0x3b, 0x29, 0x21, 0x29, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x23, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x24, 0x24, + 0x2f, 0x23, 0x32, 0x2f, 0x27, 0x2f, 0x21, +} + +// regionInclusionBits is an array of bit vectors where every vector represents +// a set of region groupings. These sets are used to compute the distance +// between two regions for the purpose of language matching. +// Size: 584 bytes, 73 elements +var regionInclusionBits = [73]uint64{ + // Entry 0 - 1F + 0x0000000102400813, 0x00000000200007a3, 0x0000000000003844, 0x0000000040000808, + 0x00000000803c0011, 0x0000000020000022, 0x0000000040000844, 0x0000000020000082, + 0x0000000000000102, 0x0000000020000202, 0x0000000020000402, 0x000000004000384d, + 0x0000000000001804, 0x0000000040002804, 0x0000000000404000, 0x0000000000408000, + 0x0000000000410000, 0x0000000002020000, 0x0000000000040010, 0x0000000000080010, + 0x0000000000100010, 0x0000000000200010, 0x0000000001c1c001, 0x0000000000c00000, + 0x0000000001400000, 0x000000001e020001, 0x0000000006000000, 0x000000000a000000, + 0x0000000012000000, 0x00000000200006a2, 0x0000000040002848, 0x0000000080000010, + // Entry 20 - 3F + 0x0000000100000001, 0x0000000000000001, 0x0000000080000000, 0x0000000000020000, + 0x0000000001000000, 0x0000000000008000, 0x0000000000002000, 0x0000000000000200, + 0x0000000000000008, 0x0000000000200000, 0x0000000110000000, 0x0000000000040000, + 0x0000000008000000, 0x0000000000000020, 0x0000000104000000, 0x0000000000000080, + 0x0000000000001000, 0x0000000000010000, 0x0000000000000400, 0x0000000004000000, + 0x0000000000000040, 0x0000000010000000, 0x0000000000004000, 0x0000000101000000, + 0x0000000108000000, 0x0000000000000100, 0x0000000100020000, 0x0000000000080000, + 0x0000000000100000, 0x0000000000800000, 0x00000001ffffffff, 0x0000000122400fb3, + // Entry 40 - 5F + 0x00000001827c0813, 0x000000014240385f, 0x0000000103c1c813, 0x000000011e420813, + 0x0000000112000001, 0x0000000106000001, 0x0000000101400001, 0x000000010a000001, + 0x0000000102020001, +} + +// regionInclusionNext marks, for each entry in regionInclusionBits, the set of +// all groups that are reachable from the groups set in the respective entry. +// Size: 73 bytes, 73 elements +var regionInclusionNext = [73]uint8{ + // Entry 0 - 3F + 0x3e, 0x3f, 0x0b, 0x0b, 0x40, 0x01, 0x0b, 0x01, + 0x01, 0x01, 0x01, 0x41, 0x0b, 0x0b, 0x16, 0x16, + 0x16, 0x19, 0x04, 0x04, 0x04, 0x04, 0x42, 0x16, + 0x16, 0x43, 0x19, 0x19, 0x19, 0x01, 0x0b, 0x04, + 0x00, 0x00, 0x1f, 0x11, 0x18, 0x0f, 0x0d, 0x09, + 0x03, 0x15, 0x44, 0x12, 0x1b, 0x05, 0x45, 0x07, + 0x0c, 0x10, 0x0a, 0x1a, 0x06, 0x1c, 0x0e, 0x46, + 0x47, 0x08, 0x48, 0x13, 0x14, 0x17, 0x3e, 0x3e, + // Entry 40 - 7F + 0x3e, 0x3e, 0x3e, 0x3e, 0x43, 0x43, 0x42, 0x43, + 0x43, +} + +type parentRel struct { + lang uint16 + script uint16 + maxScript uint16 + toRegion uint16 + fromRegion []uint16 +} + +// Size: 414 bytes, 5 elements +var parents = [5]parentRel{ + 0: {lang: 0x139, script: 0x0, maxScript: 0x5b, toRegion: 0x1, fromRegion: []uint16{0x1a, 0x25, 0x26, 0x2f, 0x34, 0x36, 0x3d, 0x42, 0x46, 0x48, 0x49, 0x4a, 0x50, 0x52, 0x5d, 0x5e, 0x62, 0x65, 0x6e, 0x74, 0x75, 0x76, 0x7c, 0x7d, 0x80, 0x81, 0x82, 0x84, 0x8d, 0x8e, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0xa0, 0xa1, 0xa5, 0xa8, 0xaa, 0xae, 0xb2, 0xb5, 0xb6, 0xc0, 0xc7, 0xcb, 0xcc, 0xcd, 0xcf, 0xd1, 0xd3, 0xd6, 0xd7, 0xde, 0xe0, 0xe1, 0xe7, 0xe8, 0xe9, 0xec, 0xf1, 0x108, 0x10a, 0x10b, 0x10c, 0x10e, 0x10f, 0x113, 0x118, 0x11c, 0x11e, 0x120, 0x126, 0x12a, 0x12d, 0x12e, 0x130, 0x132, 0x13a, 0x13d, 0x140, 0x143, 0x162, 0x163, 0x165}}, + 1: {lang: 0x139, script: 0x0, maxScript: 0x5b, toRegion: 0x1a, fromRegion: []uint16{0x2e, 0x4e, 0x61, 0x64, 0x73, 0xda, 0x10d, 0x110}}, + 2: {lang: 0x13e, script: 0x0, maxScript: 0x5b, toRegion: 0x1f, fromRegion: []uint16{0x2c, 0x3f, 0x41, 0x48, 0x51, 0x54, 0x57, 0x5a, 0x66, 0x6a, 0x8a, 0x90, 0xd0, 0xd9, 0xe3, 0xe5, 0xed, 0xf2, 0x11b, 0x136, 0x137, 0x13c}}, + 3: {lang: 0x3c0, script: 0x0, maxScript: 0x5b, toRegion: 0xef, fromRegion: []uint16{0x2a, 0x4e, 0x5b, 0x87, 0x8c, 0xb8, 0xc7, 0xd2, 0x119, 0x127}}, + 4: {lang: 0x529, script: 0x3c, maxScript: 0x3c, toRegion: 0x8e, fromRegion: []uint16{0xc7}}, +} + +// Total table size 30466 bytes (29KiB); checksum: 7544152B diff --git a/vendor/golang.org/x/text/internal/language/tags.go b/vendor/golang.org/x/text/internal/language/tags.go new file mode 100644 index 00000000000..e7afd3188e6 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/tags.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Language { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +// Und is the root language. +var Und Tag diff --git a/vendor/golang.org/x/text/internal/tag/tag.go b/vendor/golang.org/x/text/internal/tag/tag.go new file mode 100644 index 00000000000..b5d348891d8 --- /dev/null +++ b/vendor/golang.org/x/text/internal/tag/tag.go @@ -0,0 +1,100 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag contains functionality handling tags and related data. +package tag // import "golang.org/x/text/internal/tag" + +import "sort" + +// An Index converts tags to a compact numeric value. +// +// All elements are of size 4. Tags may be up to 4 bytes long. Excess bytes can +// be used to store additional information about the tag. +type Index string + +// Elem returns the element data at the given index. +func (s Index) Elem(x int) string { + return string(s[x*4 : x*4+4]) +} + +// Index reports the index of the given key or -1 if it could not be found. +// Only the first len(key) bytes from the start of the 4-byte entries will be +// considered for the search and the first match in Index will be returned. +func (s Index) Index(key []byte) int { + n := len(key) + // search the index of the first entry with an equal or higher value than + // key in s. + index := sort.Search(len(s)/4, func(i int) bool { + return cmp(s[i*4:i*4+n], key) != -1 + }) + i := index * 4 + if cmp(s[i:i+len(key)], key) != 0 { + return -1 + } + return index +} + +// Next finds the next occurrence of key after index x, which must have been +// obtained from a call to Index using the same key. It returns x+1 or -1. +func (s Index) Next(key []byte, x int) int { + if x++; x*4 < len(s) && cmp(s[x*4:x*4+len(key)], key) == 0 { + return x + } + return -1 +} + +// cmp returns an integer comparing a and b lexicographically. +func cmp(a Index, b []byte) int { + n := len(a) + if len(b) < n { + n = len(b) + } + for i, c := range b[:n] { + switch { + case a[i] > c: + return 1 + case a[i] < c: + return -1 + } + } + switch { + case len(a) < len(b): + return -1 + case len(a) > len(b): + return 1 + } + return 0 +} + +// Compare returns an integer comparing a and b lexicographically. +func Compare(a string, b []byte) int { + return cmp(Index(a), b) +} + +// FixCase reformats b to the same pattern of cases as form. +// If returns false if string b is malformed. +func FixCase(form string, b []byte) bool { + if len(form) != len(b) { + return false + } + for i, c := range b { + if form[i] <= 'Z' { + if c >= 'a' { + c -= 'z' - 'Z' + } + if c < 'A' || 'Z' < c { + return false + } + } else { + if c <= 'Z' { + c += 'z' - 'Z' + } + if c < 'a' || 'z' < c { + return false + } + } + b[i] = c + } + return true +} diff --git a/vendor/golang.org/x/text/language/coverage.go b/vendor/golang.org/x/text/language/coverage.go new file mode 100644 index 00000000000..a24fd1a4d69 --- /dev/null +++ b/vendor/golang.org/x/text/language/coverage.go @@ -0,0 +1,187 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "fmt" + "sort" + + "golang.org/x/text/internal/language" +) + +// The Coverage interface is used to define the level of coverage of an +// internationalization service. Note that not all types are supported by all +// services. As lists may be generated on the fly, it is recommended that users +// of a Coverage cache the results. +type Coverage interface { + // Tags returns the list of supported tags. + Tags() []Tag + + // BaseLanguages returns the list of supported base languages. + BaseLanguages() []Base + + // Scripts returns the list of supported scripts. + Scripts() []Script + + // Regions returns the list of supported regions. + Regions() []Region +} + +var ( + // Supported defines a Coverage that lists all supported subtags. Tags + // always returns nil. + Supported Coverage = allSubtags{} +) + +// TODO: +// - Support Variants, numbering systems. +// - CLDR coverage levels. +// - Set of common tags defined in this package. + +type allSubtags struct{} + +// Regions returns the list of supported regions. As all regions are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" region is not returned. +func (s allSubtags) Regions() []Region { + reg := make([]Region, language.NumRegions) + for i := range reg { + reg[i] = Region{language.Region(i + 1)} + } + return reg +} + +// Scripts returns the list of supported scripts. As all scripts are in a +// consecutive range, it simply returns a slice of numbers in increasing order. +// The "undefined" script is not returned. +func (s allSubtags) Scripts() []Script { + scr := make([]Script, language.NumScripts) + for i := range scr { + scr[i] = Script{language.Script(i + 1)} + } + return scr +} + +// BaseLanguages returns the list of all supported base languages. It generates +// the list by traversing the internal structures. +func (s allSubtags) BaseLanguages() []Base { + bs := language.BaseLanguages() + base := make([]Base, len(bs)) + for i, b := range bs { + base[i] = Base{b} + } + return base +} + +// Tags always returns nil. +func (s allSubtags) Tags() []Tag { + return nil +} + +// coverage is used by NewCoverage which is used as a convenient way for +// creating Coverage implementations for partially defined data. Very often a +// package will only need to define a subset of slices. coverage provides a +// convenient way to do this. Moreover, packages using NewCoverage, instead of +// their own implementation, will not break if later new slice types are added. +type coverage struct { + tags func() []Tag + bases func() []Base + scripts func() []Script + regions func() []Region +} + +func (s *coverage) Tags() []Tag { + if s.tags == nil { + return nil + } + return s.tags() +} + +// bases implements sort.Interface and is used to sort base languages. +type bases []Base + +func (b bases) Len() int { + return len(b) +} + +func (b bases) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b bases) Less(i, j int) bool { + return b[i].langID < b[j].langID +} + +// BaseLanguages returns the result from calling s.bases if it is specified or +// otherwise derives the set of supported base languages from tags. +func (s *coverage) BaseLanguages() []Base { + if s.bases == nil { + tags := s.Tags() + if len(tags) == 0 { + return nil + } + a := make([]Base, len(tags)) + for i, t := range tags { + a[i] = Base{language.Language(t.lang())} + } + sort.Sort(bases(a)) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + k++ + a[k] = a[i] + } + } + return a[:k+1] + } + return s.bases() +} + +func (s *coverage) Scripts() []Script { + if s.scripts == nil { + return nil + } + return s.scripts() +} + +func (s *coverage) Regions() []Region { + if s.regions == nil { + return nil + } + return s.regions() +} + +// NewCoverage returns a Coverage for the given lists. It is typically used by +// packages providing internationalization services to define their level of +// coverage. A list may be of type []T or func() []T, where T is either Tag, +// Base, Script or Region. The returned Coverage derives the value for Bases +// from Tags if no func or slice for []Base is specified. For other unspecified +// types the returned Coverage will return nil for the respective methods. +func NewCoverage(list ...interface{}) Coverage { + s := &coverage{} + for _, x := range list { + switch v := x.(type) { + case func() []Base: + s.bases = v + case func() []Script: + s.scripts = v + case func() []Region: + s.regions = v + case func() []Tag: + s.tags = v + case []Base: + s.bases = func() []Base { return v } + case []Script: + s.scripts = func() []Script { return v } + case []Region: + s.regions = func() []Region { return v } + case []Tag: + s.tags = func() []Tag { return v } + default: + panic(fmt.Sprintf("language: unsupported set type %T", v)) + } + } + return s +} diff --git a/vendor/golang.org/x/text/language/doc.go b/vendor/golang.org/x/text/language/doc.go new file mode 100644 index 00000000000..212b77c9068 --- /dev/null +++ b/vendor/golang.org/x/text/language/doc.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package language implements BCP 47 language tags and related functionality. +// +// The most important function of package language is to match a list of +// user-preferred languages to a list of supported languages. +// It alleviates the developer of dealing with the complexity of this process +// and provides the user with the best experience +// (see https://blog.golang.org/matchlang). +// +// # Matching preferred against supported languages +// +// A Matcher for an application that supports English, Australian English, +// Danish, and standard Mandarin can be created as follows: +// +// var matcher = language.NewMatcher([]language.Tag{ +// language.English, // The first language is used as fallback. +// language.MustParse("en-AU"), +// language.Danish, +// language.Chinese, +// }) +// +// This list of supported languages is typically implied by the languages for +// which there exists translations of the user interface. +// +// User-preferred languages usually come as a comma-separated list of BCP 47 +// language tags. +// The MatchString finds best matches for such strings: +// +// handler(w http.ResponseWriter, r *http.Request) { +// lang, _ := r.Cookie("lang") +// accept := r.Header.Get("Accept-Language") +// tag, _ := language.MatchStrings(matcher, lang.String(), accept) +// +// // tag should now be used for the initialization of any +// // locale-specific service. +// } +// +// The Matcher's Match method can be used to match Tags directly. +// +// Matchers are aware of the intricacies of equivalence between languages, such +// as deprecated subtags, legacy tags, macro languages, mutual +// intelligibility between scripts and languages, and transparently passing +// BCP 47 user configuration. +// For instance, it will know that a reader of Bokmål Danish can read Norwegian +// and will know that Cantonese ("yue") is a good match for "zh-HK". +// +// # Using match results +// +// To guarantee a consistent user experience to the user it is important to +// use the same language tag for the selection of any locale-specific services. +// For example, it is utterly confusing to substitute spelled-out numbers +// or dates in one language in text of another language. +// More subtly confusing is using the wrong sorting order or casing +// algorithm for a certain language. +// +// All the packages in x/text that provide locale-specific services +// (e.g. collate, cases) should be initialized with the tag that was +// obtained at the start of an interaction with the user. +// +// Note that Tag that is returned by Match and MatchString may differ from any +// of the supported languages, as it may contain carried over settings from +// the user tags. +// This may be inconvenient when your application has some additional +// locale-specific data for your supported languages. +// Match and MatchString both return the index of the matched supported tag +// to simplify associating such data with the matched tag. +// +// # Canonicalization +// +// If one uses the Matcher to compare languages one does not need to +// worry about canonicalization. +// +// The meaning of a Tag varies per application. The language package +// therefore delays canonicalization and preserves information as much +// as possible. The Matcher, however, will always take into account that +// two different tags may represent the same language. +// +// By default, only legacy and deprecated tags are converted into their +// canonical equivalent. All other information is preserved. This approach makes +// the confidence scores more accurate and allows matchers to distinguish +// between variants that are otherwise lost. +// +// As a consequence, two tags that should be treated as identical according to +// BCP 47 or CLDR, like "en-Latn" and "en", will be represented differently. The +// Matcher handles such distinctions, though, and is aware of the +// equivalence relations. The CanonType type can be used to alter the +// canonicalization form. +// +// # References +// +// BCP 47 - Tags for Identifying Languages http://tools.ietf.org/html/bcp47 +package language // import "golang.org/x/text/language" + +// TODO: explanation on how to match languages for your own locale-specific +// service. diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go new file mode 100644 index 00000000000..4d9c6612129 --- /dev/null +++ b/vendor/golang.org/x/text/language/language.go @@ -0,0 +1,605 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go -output tables.go + +package language + +// TODO: Remove above NOTE after: +// - verifying that tables are dropped correctly (most notably matcher tables). + +import ( + "strings" + + "golang.org/x/text/internal/language" + "golang.org/x/text/internal/language/compact" +) + +// Tag represents a BCP 47 language tag. It is used to specify an instance of a +// specific language or locale. All language tag values are guaranteed to be +// well-formed. +type Tag compact.Tag + +func makeTag(t language.Tag) (tag Tag) { + return Tag(compact.Make(t)) +} + +func (t *Tag) tag() language.Tag { + return (*compact.Tag)(t).Tag() +} + +func (t *Tag) isCompact() bool { + return (*compact.Tag)(t).IsCompact() +} + +// TODO: improve performance. +func (t *Tag) lang() language.Language { return t.tag().LangID } +func (t *Tag) region() language.Region { return t.tag().RegionID } +func (t *Tag) script() language.Script { return t.tag().ScriptID } + +// Make is a convenience wrapper for Parse that omits the error. +// In case of an error, a sensible default is returned. +func Make(s string) Tag { + return Default.Make(s) +} + +// Make is a convenience wrapper for c.Parse that omits the error. +// In case of an error, a sensible default is returned. +func (c CanonType) Make(s string) Tag { + t, _ := c.Parse(s) + return t +} + +// Raw returns the raw base language, script and region, without making an +// attempt to infer their values. +func (t Tag) Raw() (b Base, s Script, r Region) { + tt := t.tag() + return Base{tt.LangID}, Script{tt.ScriptID}, Region{tt.RegionID} +} + +// IsRoot returns true if t is equal to language "und". +func (t Tag) IsRoot() bool { + return compact.Tag(t).IsRoot() +} + +// CanonType can be used to enable or disable various types of canonicalization. +type CanonType int + +const ( + // Replace deprecated base languages with their preferred replacements. + DeprecatedBase CanonType = 1 << iota + // Replace deprecated scripts with their preferred replacements. + DeprecatedScript + // Replace deprecated regions with their preferred replacements. + DeprecatedRegion + // Remove redundant scripts. + SuppressScript + // Normalize legacy encodings. This includes legacy languages defined in + // CLDR as well as bibliographic codes defined in ISO-639. + Legacy + // Map the dominant language of a macro language group to the macro language + // subtag. For example cmn -> zh. + Macro + // The CLDR flag should be used if full compatibility with CLDR is required. + // There are a few cases where language.Tag may differ from CLDR. To follow all + // of CLDR's suggestions, use All|CLDR. + CLDR + + // Raw can be used to Compose or Parse without Canonicalization. + Raw CanonType = 0 + + // Replace all deprecated tags with their preferred replacements. + Deprecated = DeprecatedBase | DeprecatedScript | DeprecatedRegion + + // All canonicalizations recommended by BCP 47. + BCP47 = Deprecated | SuppressScript + + // All canonicalizations. + All = BCP47 | Legacy | Macro + + // Default is the canonicalization used by Parse, Make and Compose. To + // preserve as much information as possible, canonicalizations that remove + // potentially valuable information are not included. The Matcher is + // designed to recognize similar tags that would be the same if + // they were canonicalized using All. + Default = Deprecated | Legacy + + canonLang = DeprecatedBase | Legacy | Macro + + // TODO: LikelyScript, LikelyRegion: suppress similar to ICU. +) + +// canonicalize returns the canonicalized equivalent of the tag and +// whether there was any change. +func canonicalize(c CanonType, t language.Tag) (language.Tag, bool) { + if c == Raw { + return t, false + } + changed := false + if c&SuppressScript != 0 { + if t.LangID.SuppressScript() == t.ScriptID { + t.ScriptID = 0 + changed = true + } + } + if c&canonLang != 0 { + for { + if l, aliasType := t.LangID.Canonicalize(); l != t.LangID { + switch aliasType { + case language.Legacy: + if c&Legacy != 0 { + if t.LangID == _sh && t.ScriptID == 0 { + t.ScriptID = _Latn + } + t.LangID = l + changed = true + } + case language.Macro: + if c&Macro != 0 { + // We deviate here from CLDR. The mapping "nb" -> "no" + // qualifies as a typical Macro language mapping. However, + // for legacy reasons, CLDR maps "no", the macro language + // code for Norwegian, to the dominant variant "nb". This + // change is currently under consideration for CLDR as well. + // See https://unicode.org/cldr/trac/ticket/2698 and also + // https://unicode.org/cldr/trac/ticket/1790 for some of the + // practical implications. TODO: this check could be removed + // if CLDR adopts this change. + if c&CLDR == 0 || t.LangID != _nb { + changed = true + t.LangID = l + } + } + case language.Deprecated: + if c&DeprecatedBase != 0 { + if t.LangID == _mo && t.RegionID == 0 { + t.RegionID = _MD + } + t.LangID = l + changed = true + // Other canonicalization types may still apply. + continue + } + } + } else if c&Legacy != 0 && t.LangID == _no && c&CLDR != 0 { + t.LangID = _nb + changed = true + } + break + } + } + if c&DeprecatedScript != 0 { + if t.ScriptID == _Qaai { + changed = true + t.ScriptID = _Zinh + } + } + if c&DeprecatedRegion != 0 { + if r := t.RegionID.Canonicalize(); r != t.RegionID { + changed = true + t.RegionID = r + } + } + return t, changed +} + +// Canonicalize returns the canonicalized equivalent of the tag. +func (c CanonType) Canonicalize(t Tag) (Tag, error) { + // First try fast path. + if t.isCompact() { + if _, changed := canonicalize(c, compact.Tag(t).Tag()); !changed { + return t, nil + } + } + // It is unlikely that one will canonicalize a tag after matching. So do + // a slow but simple approach here. + if tag, changed := canonicalize(c, t.tag()); changed { + tag.RemakeString() + return makeTag(tag), nil + } + return t, nil + +} + +// Confidence indicates the level of certainty for a given return value. +// For example, Serbian may be written in Cyrillic or Latin script. +// The confidence level indicates whether a value was explicitly specified, +// whether it is typically the only possible value, or whether there is +// an ambiguity. +type Confidence int + +const ( + No Confidence = iota // full confidence that there was no match + Low // most likely value picked out of a set of alternatives + High // value is generally assumed to be the correct match + Exact // exact match or explicitly specified value +) + +var confName = []string{"No", "Low", "High", "Exact"} + +func (c Confidence) String() string { + return confName[c] +} + +// String returns the canonical string representation of the language tag. +func (t Tag) String() string { + return t.tag().String() +} + +// MarshalText implements encoding.TextMarshaler. +func (t Tag) MarshalText() (text []byte, err error) { + return t.tag().MarshalText() +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (t *Tag) UnmarshalText(text []byte) error { + var tag language.Tag + err := tag.UnmarshalText(text) + *t = makeTag(tag) + return err +} + +// Base returns the base language of the language tag. If the base language is +// unspecified, an attempt will be made to infer it from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Base() (Base, Confidence) { + if b := t.lang(); b != 0 { + return Base{b}, Exact + } + tt := t.tag() + c := High + if tt.ScriptID == 0 && !tt.RegionID.IsCountry() { + c = Low + } + if tag, err := tt.Maximize(); err == nil && tag.LangID != 0 { + return Base{tag.LangID}, c + } + return Base{0}, No +} + +// Script infers the script for the language tag. If it was not explicitly given, it will infer +// a most likely candidate. +// If more than one script is commonly used for a language, the most likely one +// is returned with a low confidence indication. For example, it returns (Cyrl, Low) +// for Serbian. +// If a script cannot be inferred (Zzzz, No) is returned. We do not use Zyyy (undetermined) +// as one would suspect from the IANA registry for BCP 47. In a Unicode context Zyyy marks +// common characters (like 1, 2, 3, '.', etc.) and is therefore more like multiple scripts. +// See https://www.unicode.org/reports/tr24/#Values for more details. Zzzz is also used for +// unknown value in CLDR. (Zzzz, Exact) is returned if Zzzz was explicitly specified. +// Note that an inferred script is never guaranteed to be the correct one. Latin is +// almost exclusively used for Afrikaans, but Arabic has been used for some texts +// in the past. Also, the script that is commonly used may change over time. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Script() (Script, Confidence) { + if scr := t.script(); scr != 0 { + return Script{scr}, Exact + } + tt := t.tag() + sc, c := language.Script(_Zzzz), No + if scr := tt.LangID.SuppressScript(); scr != 0 { + // Note: it is not always the case that a language with a suppress + // script value is only written in one script (e.g. kk, ms, pa). + if tt.RegionID == 0 { + return Script{scr}, High + } + sc, c = scr, High + } + if tag, err := tt.Maximize(); err == nil { + if tag.ScriptID != sc { + sc, c = tag.ScriptID, Low + } + } else { + tt, _ = canonicalize(Deprecated|Macro, tt) + if tag, err := tt.Maximize(); err == nil && tag.ScriptID != sc { + sc, c = tag.ScriptID, Low + } + } + return Script{sc}, c +} + +// Region returns the region for the language tag. If it was not explicitly given, it will +// infer a most likely candidate from the context. +// It uses a variant of CLDR's Add Likely Subtags algorithm. This is subject to change. +func (t Tag) Region() (Region, Confidence) { + if r := t.region(); r != 0 { + return Region{r}, Exact + } + tt := t.tag() + if tt, err := tt.Maximize(); err == nil { + return Region{tt.RegionID}, Low // TODO: differentiate between high and low. + } + tt, _ = canonicalize(Deprecated|Macro, tt) + if tag, err := tt.Maximize(); err == nil { + return Region{tag.RegionID}, Low + } + return Region{_ZZ}, No // TODO: return world instead of undetermined? +} + +// Variants returns the variants specified explicitly for this language tag. +// or nil if no variant was specified. +func (t Tag) Variants() []Variant { + if !compact.Tag(t).MayHaveVariants() { + return nil + } + v := []Variant{} + x, str := "", t.tag().Variants() + for str != "" { + x, str = nextToken(str) + v = append(v, Variant{x}) + } + return v +} + +// Parent returns the CLDR parent of t. In CLDR, missing fields in data for a +// specific language are substituted with fields from the parent language. +// The parent for a language may change for newer versions of CLDR. +// +// Parent returns a tag for a less specific language that is mutually +// intelligible or Und if there is no such language. This may not be the same as +// simply stripping the last BCP 47 subtag. For instance, the parent of "zh-TW" +// is "zh-Hant", and the parent of "zh-Hant" is "und". +func (t Tag) Parent() Tag { + return Tag(compact.Tag(t).Parent()) +} + +// nextToken returns token t and the rest of the string. +func nextToken(s string) (t, tail string) { + p := strings.Index(s[1:], "-") + if p == -1 { + return s[1:], "" + } + p++ + return s[1:p], s[p:] +} + +// Extension is a single BCP 47 extension. +type Extension struct { + s string +} + +// String returns the string representation of the extension, including the +// type tag. +func (e Extension) String() string { + return e.s +} + +// ParseExtension parses s as an extension and returns it on success. +func ParseExtension(s string) (e Extension, err error) { + ext, err := language.ParseExtension(s) + return Extension{ext}, err +} + +// Type returns the one-byte extension type of e. It returns 0 for the zero +// exception. +func (e Extension) Type() byte { + if e.s == "" { + return 0 + } + return e.s[0] +} + +// Tokens returns the list of tokens of e. +func (e Extension) Tokens() []string { + return strings.Split(e.s, "-") +} + +// Extension returns the extension of type x for tag t. It will return +// false for ok if t does not have the requested extension. The returned +// extension will be invalid in this case. +func (t Tag) Extension(x byte) (ext Extension, ok bool) { + if !compact.Tag(t).MayHaveExtensions() { + return Extension{}, false + } + e, ok := t.tag().Extension(x) + return Extension{e}, ok +} + +// Extensions returns all extensions of t. +func (t Tag) Extensions() []Extension { + if !compact.Tag(t).MayHaveExtensions() { + return nil + } + e := []Extension{} + for _, ext := range t.tag().Extensions() { + e = append(e, Extension{ext}) + } + return e +} + +// TypeForKey returns the type associated with the given key, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// TypeForKey will traverse the inheritance chain to get the correct value. +// +// If there are multiple types associated with a key, only the first will be +// returned. If there is no type associated with a key, it returns the empty +// string. +func (t Tag) TypeForKey(key string) string { + if !compact.Tag(t).MayHaveExtensions() { + if key != "rg" && key != "va" { + return "" + } + } + return t.tag().TypeForKey(key) +} + +// SetTypeForKey returns a new Tag with the key set to type, where key and type +// are of the allowed values defined for the Unicode locale extension ('u') in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// An empty value removes an existing pair with the same key. +func (t Tag) SetTypeForKey(key, value string) (Tag, error) { + tt, err := t.tag().SetTypeForKey(key, value) + return makeTag(tt), err +} + +// NumCompactTags is the number of compact tags. The maximum tag is +// NumCompactTags-1. +const NumCompactTags = compact.NumCompactTags + +// CompactIndex returns an index, where 0 <= index < NumCompactTags, for tags +// for which data exists in the text repository.The index will change over time +// and should not be stored in persistent storage. If t does not match a compact +// index, exact will be false and the compact index will be returned for the +// first match after repeatedly taking the Parent of t. +func CompactIndex(t Tag) (index int, exact bool) { + id, exact := compact.LanguageID(compact.Tag(t)) + return int(id), exact +} + +var root = language.Tag{} + +// Base is an ISO 639 language code, used for encoding the base language +// of a language tag. +type Base struct { + langID language.Language +} + +// ParseBase parses a 2- or 3-letter ISO 639 code. +// It returns a ValueError if s is a well-formed but unknown language identifier +// or another error if another error occurred. +func ParseBase(s string) (Base, error) { + l, err := language.ParseBase(s) + return Base{l}, err +} + +// String returns the BCP 47 representation of the base language. +func (b Base) String() string { + return b.langID.String() +} + +// ISO3 returns the ISO 639-3 language code. +func (b Base) ISO3() string { + return b.langID.ISO3() +} + +// IsPrivateUse reports whether this language code is reserved for private use. +func (b Base) IsPrivateUse() bool { + return b.langID.IsPrivateUse() +} + +// Script is a 4-letter ISO 15924 code for representing scripts. +// It is idiomatically represented in title case. +type Script struct { + scriptID language.Script +} + +// ParseScript parses a 4-letter ISO 15924 code. +// It returns a ValueError if s is a well-formed but unknown script identifier +// or another error if another error occurred. +func ParseScript(s string) (Script, error) { + sc, err := language.ParseScript(s) + return Script{sc}, err +} + +// String returns the script code in title case. +// It returns "Zzzz" for an unspecified script. +func (s Script) String() string { + return s.scriptID.String() +} + +// IsPrivateUse reports whether this script code is reserved for private use. +func (s Script) IsPrivateUse() bool { + return s.scriptID.IsPrivateUse() +} + +// Region is an ISO 3166-1 or UN M.49 code for representing countries and regions. +type Region struct { + regionID language.Region +} + +// EncodeM49 returns the Region for the given UN M.49 code. +// It returns an error if r is not a valid code. +func EncodeM49(r int) (Region, error) { + rid, err := language.EncodeM49(r) + return Region{rid}, err +} + +// ParseRegion parses a 2- or 3-letter ISO 3166-1 or a UN M.49 code. +// It returns a ValueError if s is a well-formed but unknown region identifier +// or another error if another error occurred. +func ParseRegion(s string) (Region, error) { + r, err := language.ParseRegion(s) + return Region{r}, err +} + +// String returns the BCP 47 representation for the region. +// It returns "ZZ" for an unspecified region. +func (r Region) String() string { + return r.regionID.String() +} + +// ISO3 returns the 3-letter ISO code of r. +// Note that not all regions have a 3-letter ISO code. +// In such cases this method returns "ZZZ". +func (r Region) ISO3() string { + return r.regionID.ISO3() +} + +// M49 returns the UN M.49 encoding of r, or 0 if this encoding +// is not defined for r. +func (r Region) M49() int { + return r.regionID.M49() +} + +// IsPrivateUse reports whether r has the ISO 3166 User-assigned status. This +// may include private-use tags that are assigned by CLDR and used in this +// implementation. So IsPrivateUse and IsCountry can be simultaneously true. +func (r Region) IsPrivateUse() bool { + return r.regionID.IsPrivateUse() +} + +// IsCountry returns whether this region is a country or autonomous area. This +// includes non-standard definitions from CLDR. +func (r Region) IsCountry() bool { + return r.regionID.IsCountry() +} + +// IsGroup returns whether this region defines a collection of regions. This +// includes non-standard definitions from CLDR. +func (r Region) IsGroup() bool { + return r.regionID.IsGroup() +} + +// Contains returns whether Region c is contained by Region r. It returns true +// if c == r. +func (r Region) Contains(c Region) bool { + return r.regionID.Contains(c.regionID) +} + +// TLD returns the country code top-level domain (ccTLD). UK is returned for GB. +// In all other cases it returns either the region itself or an error. +// +// This method may return an error for a region for which there exists a +// canonical form with a ccTLD. To get that ccTLD canonicalize r first. The +// region will already be canonicalized it was obtained from a Tag that was +// obtained using any of the default methods. +func (r Region) TLD() (Region, error) { + tld, err := r.regionID.TLD() + return Region{tld}, err +} + +// Canonicalize returns the region or a possible replacement if the region is +// deprecated. It will not return a replacement for deprecated regions that +// are split into multiple regions. +func (r Region) Canonicalize() Region { + return Region{r.regionID.Canonicalize()} +} + +// Variant represents a registered variant of a language as defined by BCP 47. +type Variant struct { + variant string +} + +// ParseVariant parses and returns a Variant. An error is returned if s is not +// a valid variant. +func ParseVariant(s string) (Variant, error) { + v, err := language.ParseVariant(s) + return Variant{v.String()}, err +} + +// String returns the string representation of the variant. +func (v Variant) String() string { + return v.variant +} diff --git a/vendor/golang.org/x/text/language/match.go b/vendor/golang.org/x/text/language/match.go new file mode 100644 index 00000000000..1153baf291c --- /dev/null +++ b/vendor/golang.org/x/text/language/match.go @@ -0,0 +1,735 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "errors" + "strings" + + "golang.org/x/text/internal/language" +) + +// A MatchOption configures a Matcher. +type MatchOption func(*matcher) + +// PreferSameScript will, in the absence of a match, result in the first +// preferred tag with the same script as a supported tag to match this supported +// tag. The default is currently true, but this may change in the future. +func PreferSameScript(preferSame bool) MatchOption { + return func(m *matcher) { m.preferSameScript = preferSame } +} + +// TODO(v1.0.0): consider making Matcher a concrete type, instead of interface. +// There doesn't seem to be too much need for multiple types. +// Making it a concrete type allows MatchStrings to be a method, which will +// improve its discoverability. + +// MatchStrings parses and matches the given strings until one of them matches +// the language in the Matcher. A string may be an Accept-Language header as +// handled by ParseAcceptLanguage. The default language is returned if no +// other language matched. +func MatchStrings(m Matcher, lang ...string) (tag Tag, index int) { + for _, accept := range lang { + desired, _, err := ParseAcceptLanguage(accept) + if err != nil { + continue + } + if tag, index, conf := m.Match(desired...); conf != No { + return tag, index + } + } + tag, index, _ = m.Match() + return +} + +// Matcher is the interface that wraps the Match method. +// +// Match returns the best match for any of the given tags, along with +// a unique index associated with the returned tag and a confidence +// score. +type Matcher interface { + Match(t ...Tag) (tag Tag, index int, c Confidence) +} + +// Comprehends reports the confidence score for a speaker of a given language +// to being able to comprehend the written form of an alternative language. +func Comprehends(speaker, alternative Tag) Confidence { + _, _, c := NewMatcher([]Tag{alternative}).Match(speaker) + return c +} + +// NewMatcher returns a Matcher that matches an ordered list of preferred tags +// against a list of supported tags based on written intelligibility, closeness +// of dialect, equivalence of subtags and various other rules. It is initialized +// with the list of supported tags. The first element is used as the default +// value in case no match is found. +// +// Its Match method matches the first of the given Tags to reach a certain +// confidence threshold. The tags passed to Match should therefore be specified +// in order of preference. Extensions are ignored for matching. +// +// The index returned by the Match method corresponds to the index of the +// matched tag in t, but is augmented with the Unicode extension ('u')of the +// corresponding preferred tag. This allows user locale options to be passed +// transparently. +func NewMatcher(t []Tag, options ...MatchOption) Matcher { + return newMatcher(t, options) +} + +func (m *matcher) Match(want ...Tag) (t Tag, index int, c Confidence) { + var tt language.Tag + match, w, c := m.getBest(want...) + if match != nil { + tt, index = match.tag, match.index + } else { + // TODO: this should be an option + tt = m.default_.tag + if m.preferSameScript { + outer: + for _, w := range want { + script, _ := w.Script() + if script.scriptID == 0 { + // Don't do anything if there is no script, such as with + // private subtags. + continue + } + for i, h := range m.supported { + if script.scriptID == h.maxScript { + tt, index = h.tag, i + break outer + } + } + } + } + // TODO: select first language tag based on script. + } + if w.RegionID != tt.RegionID && w.RegionID != 0 { + if w.RegionID != 0 && tt.RegionID != 0 && tt.RegionID.Contains(w.RegionID) { + tt.RegionID = w.RegionID + tt.RemakeString() + } else if r := w.RegionID.String(); len(r) == 2 { + // TODO: also filter macro and deprecated. + tt, _ = tt.SetTypeForKey("rg", strings.ToLower(r)+"zzzz") + } + } + // Copy options from the user-provided tag into the result tag. This is hard + // to do after the fact, so we do it here. + // TODO: add in alternative variants to -u-va-. + // TODO: add preferred region to -u-rg-. + if e := w.Extensions(); len(e) > 0 { + b := language.Builder{} + b.SetTag(tt) + for _, e := range e { + b.AddExt(e) + } + tt = b.Make() + } + return makeTag(tt), index, c +} + +// ErrMissingLikelyTagsData indicates no information was available +// to compute likely values of missing tags. +var ErrMissingLikelyTagsData = errors.New("missing likely tags data") + +// func (t *Tag) setTagsFrom(id Tag) { +// t.LangID = id.LangID +// t.ScriptID = id.ScriptID +// t.RegionID = id.RegionID +// } + +// Tag Matching +// CLDR defines an algorithm for finding the best match between two sets of language +// tags. The basic algorithm defines how to score a possible match and then find +// the match with the best score +// (see https://www.unicode.org/reports/tr35/#LanguageMatching). +// Using scoring has several disadvantages. The scoring obfuscates the importance of +// the various factors considered, making the algorithm harder to understand. Using +// scoring also requires the full score to be computed for each pair of tags. +// +// We will use a different algorithm which aims to have the following properties: +// - clarity on the precedence of the various selection factors, and +// - improved performance by allowing early termination of a comparison. +// +// Matching algorithm (overview) +// Input: +// - supported: a set of supported tags +// - default: the default tag to return in case there is no match +// - desired: list of desired tags, ordered by preference, starting with +// the most-preferred. +// +// Algorithm: +// 1) Set the best match to the lowest confidence level +// 2) For each tag in "desired": +// a) For each tag in "supported": +// 1) compute the match between the two tags. +// 2) if the match is better than the previous best match, replace it +// with the new match. (see next section) +// b) if the current best match is Exact and pin is true the result will be +// frozen to the language found thusfar, although better matches may +// still be found for the same language. +// 3) If the best match so far is below a certain threshold, return "default". +// +// Ranking: +// We use two phases to determine whether one pair of tags are a better match +// than another pair of tags. First, we determine a rough confidence level. If the +// levels are different, the one with the highest confidence wins. +// Second, if the rough confidence levels are identical, we use a set of tie-breaker +// rules. +// +// The confidence level of matching a pair of tags is determined by finding the +// lowest confidence level of any matches of the corresponding subtags (the +// result is deemed as good as its weakest link). +// We define the following levels: +// Exact - An exact match of a subtag, before adding likely subtags. +// MaxExact - An exact match of a subtag, after adding likely subtags. +// [See Note 2]. +// High - High level of mutual intelligibility between different subtag +// variants. +// Low - Low level of mutual intelligibility between different subtag +// variants. +// No - No mutual intelligibility. +// +// The following levels can occur for each type of subtag: +// Base: Exact, MaxExact, High, Low, No +// Script: Exact, MaxExact [see Note 3], Low, No +// Region: Exact, MaxExact, High +// Variant: Exact, High +// Private: Exact, No +// +// Any result with a confidence level of Low or higher is deemed a possible match. +// Once a desired tag matches any of the supported tags with a level of MaxExact +// or higher, the next desired tag is not considered (see Step 2.b). +// Note that CLDR provides languageMatching data that defines close equivalence +// classes for base languages, scripts and regions. +// +// Tie-breaking +// If we get the same confidence level for two matches, we apply a sequence of +// tie-breaking rules. The first that succeeds defines the result. The rules are +// applied in the following order. +// 1) Original language was defined and was identical. +// 2) Original region was defined and was identical. +// 3) Distance between two maximized regions was the smallest. +// 4) Original script was defined and was identical. +// 5) Distance from want tag to have tag using the parent relation [see Note 5.] +// If there is still no winner after these rules are applied, the first match +// found wins. +// +// Notes: +// [2] In practice, as matching of Exact is done in a separate phase from +// matching the other levels, we reuse the Exact level to mean MaxExact in +// the second phase. As a consequence, we only need the levels defined by +// the Confidence type. The MaxExact confidence level is mapped to High in +// the public API. +// [3] We do not differentiate between maximized script values that were derived +// from suppressScript versus most likely tag data. We determined that in +// ranking the two, one ranks just after the other. Moreover, the two cannot +// occur concurrently. As a consequence, they are identical for practical +// purposes. +// [4] In case of deprecated, macro-equivalents and legacy mappings, we assign +// the MaxExact level to allow iw vs he to still be a closer match than +// en-AU vs en-US, for example. +// [5] In CLDR a locale inherits fields that are unspecified for this locale +// from its parent. Therefore, if a locale is a parent of another locale, +// it is a strong measure for closeness, especially when no other tie +// breaker rule applies. One could also argue it is inconsistent, for +// example, when pt-AO matches pt (which CLDR equates with pt-BR), even +// though its parent is pt-PT according to the inheritance rules. +// +// Implementation Details: +// There are several performance considerations worth pointing out. Most notably, +// we preprocess as much as possible (within reason) at the time of creation of a +// matcher. This includes: +// - creating a per-language map, which includes data for the raw base language +// and its canonicalized variant (if applicable), +// - expanding entries for the equivalence classes defined in CLDR's +// languageMatch data. +// The per-language map ensures that typically only a very small number of tags +// need to be considered. The pre-expansion of canonicalized subtags and +// equivalence classes reduces the amount of map lookups that need to be done at +// runtime. + +// matcher keeps a set of supported language tags, indexed by language. +type matcher struct { + default_ *haveTag + supported []*haveTag + index map[language.Language]*matchHeader + passSettings bool + preferSameScript bool +} + +// matchHeader has the lists of tags for exact matches and matches based on +// maximized and canonicalized tags for a given language. +type matchHeader struct { + haveTags []*haveTag + original bool +} + +// haveTag holds a supported Tag and its maximized script and region. The maximized +// or canonicalized language is not stored as it is not needed during matching. +type haveTag struct { + tag language.Tag + + // index of this tag in the original list of supported tags. + index int + + // conf is the maximum confidence that can result from matching this haveTag. + // When conf < Exact this means it was inserted after applying a CLDR equivalence rule. + conf Confidence + + // Maximized region and script. + maxRegion language.Region + maxScript language.Script + + // altScript may be checked as an alternative match to maxScript. If altScript + // matches, the confidence level for this match is Low. Theoretically there + // could be multiple alternative scripts. This does not occur in practice. + altScript language.Script + + // nextMax is the index of the next haveTag with the same maximized tags. + nextMax uint16 +} + +func makeHaveTag(tag language.Tag, index int) (haveTag, language.Language) { + max := tag + if tag.LangID != 0 || tag.RegionID != 0 || tag.ScriptID != 0 { + max, _ = canonicalize(All, max) + max, _ = max.Maximize() + max.RemakeString() + } + return haveTag{tag, index, Exact, max.RegionID, max.ScriptID, altScript(max.LangID, max.ScriptID), 0}, max.LangID +} + +// altScript returns an alternative script that may match the given script with +// a low confidence. At the moment, the langMatch data allows for at most one +// script to map to another and we rely on this to keep the code simple. +func altScript(l language.Language, s language.Script) language.Script { + for _, alt := range matchScript { + // TODO: also match cases where language is not the same. + if (language.Language(alt.wantLang) == l || language.Language(alt.haveLang) == l) && + language.Script(alt.haveScript) == s { + return language.Script(alt.wantScript) + } + } + return 0 +} + +// addIfNew adds a haveTag to the list of tags only if it is a unique tag. +// Tags that have the same maximized values are linked by index. +func (h *matchHeader) addIfNew(n haveTag, exact bool) { + h.original = h.original || exact + // Don't add new exact matches. + for _, v := range h.haveTags { + if equalsRest(v.tag, n.tag) { + return + } + } + // Allow duplicate maximized tags, but create a linked list to allow quickly + // comparing the equivalents and bail out. + for i, v := range h.haveTags { + if v.maxScript == n.maxScript && + v.maxRegion == n.maxRegion && + v.tag.VariantOrPrivateUseTags() == n.tag.VariantOrPrivateUseTags() { + for h.haveTags[i].nextMax != 0 { + i = int(h.haveTags[i].nextMax) + } + h.haveTags[i].nextMax = uint16(len(h.haveTags)) + break + } + } + h.haveTags = append(h.haveTags, &n) +} + +// header returns the matchHeader for the given language. It creates one if +// it doesn't already exist. +func (m *matcher) header(l language.Language) *matchHeader { + if h := m.index[l]; h != nil { + return h + } + h := &matchHeader{} + m.index[l] = h + return h +} + +func toConf(d uint8) Confidence { + if d <= 10 { + return High + } + if d < 30 { + return Low + } + return No +} + +// newMatcher builds an index for the given supported tags and returns it as +// a matcher. It also expands the index by considering various equivalence classes +// for a given tag. +func newMatcher(supported []Tag, options []MatchOption) *matcher { + m := &matcher{ + index: make(map[language.Language]*matchHeader), + preferSameScript: true, + } + for _, o := range options { + o(m) + } + if len(supported) == 0 { + m.default_ = &haveTag{} + return m + } + // Add supported languages to the index. Add exact matches first to give + // them precedence. + for i, tag := range supported { + tt := tag.tag() + pair, _ := makeHaveTag(tt, i) + m.header(tt.LangID).addIfNew(pair, true) + m.supported = append(m.supported, &pair) + } + m.default_ = m.header(supported[0].lang()).haveTags[0] + // Keep these in two different loops to support the case that two equivalent + // languages are distinguished, such as iw and he. + for i, tag := range supported { + tt := tag.tag() + pair, max := makeHaveTag(tt, i) + if max != tt.LangID { + m.header(max).addIfNew(pair, true) + } + } + + // update is used to add indexes in the map for equivalent languages. + // update will only add entries to original indexes, thus not computing any + // transitive relations. + update := func(want, have uint16, conf Confidence) { + if hh := m.index[language.Language(have)]; hh != nil { + if !hh.original { + return + } + hw := m.header(language.Language(want)) + for _, ht := range hh.haveTags { + v := *ht + if conf < v.conf { + v.conf = conf + } + v.nextMax = 0 // this value needs to be recomputed + if v.altScript != 0 { + v.altScript = altScript(language.Language(want), v.maxScript) + } + hw.addIfNew(v, conf == Exact && hh.original) + } + } + } + + // Add entries for languages with mutual intelligibility as defined by CLDR's + // languageMatch data. + for _, ml := range matchLang { + update(ml.want, ml.have, toConf(ml.distance)) + if !ml.oneway { + update(ml.have, ml.want, toConf(ml.distance)) + } + } + + // Add entries for possible canonicalizations. This is an optimization to + // ensure that only one map lookup needs to be done at runtime per desired tag. + // First we match deprecated equivalents. If they are perfect equivalents + // (their canonicalization simply substitutes a different language code, but + // nothing else), the match confidence is Exact, otherwise it is High. + for i, lm := range language.AliasMap { + // If deprecated codes match and there is no fiddling with the script + // or region, we consider it an exact match. + conf := Exact + if language.AliasTypes[i] != language.Macro { + if !isExactEquivalent(language.Language(lm.From)) { + conf = High + } + update(lm.To, lm.From, conf) + } + update(lm.From, lm.To, conf) + } + return m +} + +// getBest gets the best matching tag in m for any of the given tags, taking into +// account the order of preference of the given tags. +func (m *matcher) getBest(want ...Tag) (got *haveTag, orig language.Tag, c Confidence) { + best := bestMatch{} + for i, ww := range want { + w := ww.tag() + var max language.Tag + // Check for exact match first. + h := m.index[w.LangID] + if w.LangID != 0 { + if h == nil { + continue + } + // Base language is defined. + max, _ = canonicalize(Legacy|Deprecated|Macro, w) + // A region that is added through canonicalization is stronger than + // a maximized region: set it in the original (e.g. mo -> ro-MD). + if w.RegionID != max.RegionID { + w.RegionID = max.RegionID + } + // TODO: should we do the same for scripts? + // See test case: en, sr, nl ; sh ; sr + max, _ = max.Maximize() + } else { + // Base language is not defined. + if h != nil { + for i := range h.haveTags { + have := h.haveTags[i] + if equalsRest(have.tag, w) { + return have, w, Exact + } + } + } + if w.ScriptID == 0 && w.RegionID == 0 { + // We skip all tags matching und for approximate matching, including + // private tags. + continue + } + max, _ = w.Maximize() + if h = m.index[max.LangID]; h == nil { + continue + } + } + pin := true + for _, t := range want[i+1:] { + if w.LangID == t.lang() { + pin = false + break + } + } + // Check for match based on maximized tag. + for i := range h.haveTags { + have := h.haveTags[i] + best.update(have, w, max.ScriptID, max.RegionID, pin) + if best.conf == Exact { + for have.nextMax != 0 { + have = h.haveTags[have.nextMax] + best.update(have, w, max.ScriptID, max.RegionID, pin) + } + return best.have, best.want, best.conf + } + } + } + if best.conf <= No { + if len(want) != 0 { + return nil, want[0].tag(), No + } + return nil, language.Tag{}, No + } + return best.have, best.want, best.conf +} + +// bestMatch accumulates the best match so far. +type bestMatch struct { + have *haveTag + want language.Tag + conf Confidence + pinnedRegion language.Region + pinLanguage bool + sameRegionGroup bool + // Cached results from applying tie-breaking rules. + origLang bool + origReg bool + paradigmReg bool + regGroupDist uint8 + origScript bool +} + +// update updates the existing best match if the new pair is considered to be a +// better match. To determine if the given pair is a better match, it first +// computes the rough confidence level. If this surpasses the current match, it +// will replace it and update the tie-breaker rule cache. If there is a tie, it +// proceeds with applying a series of tie-breaker rules. If there is no +// conclusive winner after applying the tie-breaker rules, it leaves the current +// match as the preferred match. +// +// If pin is true and have and tag are a strong match, it will henceforth only +// consider matches for this language. This corresponds to the idea that most +// users have a strong preference for the first defined language. A user can +// still prefer a second language over a dialect of the preferred language by +// explicitly specifying dialects, e.g. "en, nl, en-GB". In this case pin should +// be false. +func (m *bestMatch) update(have *haveTag, tag language.Tag, maxScript language.Script, maxRegion language.Region, pin bool) { + // Bail if the maximum attainable confidence is below that of the current best match. + c := have.conf + if c < m.conf { + return + } + // Don't change the language once we already have found an exact match. + if m.pinLanguage && tag.LangID != m.want.LangID { + return + } + // Pin the region group if we are comparing tags for the same language. + if tag.LangID == m.want.LangID && m.sameRegionGroup { + _, sameGroup := regionGroupDist(m.pinnedRegion, have.maxRegion, have.maxScript, m.want.LangID) + if !sameGroup { + return + } + } + if c == Exact && have.maxScript == maxScript { + // If there is another language and then another entry of this language, + // don't pin anything, otherwise pin the language. + m.pinLanguage = pin + } + if equalsRest(have.tag, tag) { + } else if have.maxScript != maxScript { + // There is usually very little comprehension between different scripts. + // In a few cases there may still be Low comprehension. This possibility + // is pre-computed and stored in have.altScript. + if Low < m.conf || have.altScript != maxScript { + return + } + c = Low + } else if have.maxRegion != maxRegion { + if High < c { + // There is usually a small difference between languages across regions. + c = High + } + } + + // We store the results of the computations of the tie-breaker rules along + // with the best match. There is no need to do the checks once we determine + // we have a winner, but we do still need to do the tie-breaker computations. + // We use "beaten" to keep track if we still need to do the checks. + beaten := false // true if the new pair defeats the current one. + if c != m.conf { + if c < m.conf { + return + } + beaten = true + } + + // Tie-breaker rules: + // We prefer if the pre-maximized language was specified and identical. + origLang := have.tag.LangID == tag.LangID && tag.LangID != 0 + if !beaten && m.origLang != origLang { + if m.origLang { + return + } + beaten = true + } + + // We prefer if the pre-maximized region was specified and identical. + origReg := have.tag.RegionID == tag.RegionID && tag.RegionID != 0 + if !beaten && m.origReg != origReg { + if m.origReg { + return + } + beaten = true + } + + regGroupDist, sameGroup := regionGroupDist(have.maxRegion, maxRegion, maxScript, tag.LangID) + if !beaten && m.regGroupDist != regGroupDist { + if regGroupDist > m.regGroupDist { + return + } + beaten = true + } + + paradigmReg := isParadigmLocale(tag.LangID, have.maxRegion) + if !beaten && m.paradigmReg != paradigmReg { + if !paradigmReg { + return + } + beaten = true + } + + // Next we prefer if the pre-maximized script was specified and identical. + origScript := have.tag.ScriptID == tag.ScriptID && tag.ScriptID != 0 + if !beaten && m.origScript != origScript { + if m.origScript { + return + } + beaten = true + } + + // Update m to the newly found best match. + if beaten { + m.have = have + m.want = tag + m.conf = c + m.pinnedRegion = maxRegion + m.sameRegionGroup = sameGroup + m.origLang = origLang + m.origReg = origReg + m.paradigmReg = paradigmReg + m.origScript = origScript + m.regGroupDist = regGroupDist + } +} + +func isParadigmLocale(lang language.Language, r language.Region) bool { + for _, e := range paradigmLocales { + if language.Language(e[0]) == lang && (r == language.Region(e[1]) || r == language.Region(e[2])) { + return true + } + } + return false +} + +// regionGroupDist computes the distance between two regions based on their +// CLDR grouping. +func regionGroupDist(a, b language.Region, script language.Script, lang language.Language) (dist uint8, same bool) { + const defaultDistance = 4 + + aGroup := uint(regionToGroups[a]) << 1 + bGroup := uint(regionToGroups[b]) << 1 + for _, ri := range matchRegion { + if language.Language(ri.lang) == lang && (ri.script == 0 || language.Script(ri.script) == script) { + group := uint(1 << (ri.group &^ 0x80)) + if 0x80&ri.group == 0 { + if aGroup&bGroup&group != 0 { // Both regions are in the group. + return ri.distance, ri.distance == defaultDistance + } + } else { + if (aGroup|bGroup)&group == 0 { // Both regions are not in the group. + return ri.distance, ri.distance == defaultDistance + } + } + } + } + return defaultDistance, true +} + +// equalsRest compares everything except the language. +func equalsRest(a, b language.Tag) bool { + // TODO: don't include extensions in this comparison. To do this efficiently, + // though, we should handle private tags separately. + return a.ScriptID == b.ScriptID && a.RegionID == b.RegionID && a.VariantOrPrivateUseTags() == b.VariantOrPrivateUseTags() +} + +// isExactEquivalent returns true if canonicalizing the language will not alter +// the script or region of a tag. +func isExactEquivalent(l language.Language) bool { + for _, o := range notEquivalent { + if o == l { + return false + } + } + return true +} + +var notEquivalent []language.Language + +func init() { + // Create a list of all languages for which canonicalization may alter the + // script or region. + for _, lm := range language.AliasMap { + tag := language.Tag{LangID: language.Language(lm.From)} + if tag, _ = canonicalize(All, tag); tag.ScriptID != 0 || tag.RegionID != 0 { + notEquivalent = append(notEquivalent, language.Language(lm.From)) + } + } + // Maximize undefined regions of paradigm locales. + for i, v := range paradigmLocales { + t := language.Tag{LangID: language.Language(v[0])} + max, _ := t.Maximize() + if v[1] == 0 { + paradigmLocales[i][1] = uint16(max.RegionID) + } + if v[2] == 0 { + paradigmLocales[i][2] = uint16(max.RegionID) + } + } +} diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go new file mode 100644 index 00000000000..4d57222e770 --- /dev/null +++ b/vendor/golang.org/x/text/language/parse.go @@ -0,0 +1,256 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import ( + "errors" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/language" +) + +// ValueError is returned by any of the parsing functions when the +// input is well-formed but the respective subtag is not recognized +// as a valid value. +type ValueError interface { + error + + // Subtag returns the subtag for which the error occurred. + Subtag() string +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the default canonicalization type. +func Parse(s string) (t Tag, err error) { + return Default.Parse(s) +} + +// Parse parses the given BCP 47 string and returns a valid Tag. If parsing +// failed it returns an error and any part of the tag that could be parsed. +// If parsing succeeded but an unknown value was found, it returns +// ValueError. The Tag returned in this case is just stripped of the unknown +// value. All other values are preserved. It accepts tags in the BCP 47 format +// and extensions to this standard defined in +// https://www.unicode.org/reports/tr35/#Unicode_Language_and_Locale_Identifiers. +// The resulting tag is canonicalized using the canonicalization type c. +func (c CanonType) Parse(s string) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + + tt, err := language.Parse(s) + if err != nil { + return makeTag(tt), err + } + tt, changed := canonicalize(c, tt) + if changed { + tt.RemakeString() + } + return makeTag(tt), err +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. For -u extensions, though, the key-type pairs are +// added, where later values overwrite older ones. A Tag overwrites all former +// values and typically only makes sense as the first argument. The resulting +// tag is returned after canonicalizing using the Default CanonType. If one or +// more errors are encountered, one of the errors is returned. +func Compose(part ...interface{}) (t Tag, err error) { + return Default.Compose(part...) +} + +// Compose creates a Tag from individual parts, which may be of type Tag, Base, +// Script, Region, Variant, []Variant, Extension, []Extension or error. If a +// Base, Script or Region or slice of type Variant or Extension is passed more +// than once, the latter will overwrite the former. Variants and Extensions are +// accumulated, but if two extensions of the same type are passed, the latter +// will replace the former. For -u extensions, though, the key-type pairs are +// added, where later values overwrite older ones. A Tag overwrites all former +// values and typically only makes sense as the first argument. The resulting +// tag is returned after canonicalizing using CanonType c. If one or more errors +// are encountered, one of the errors is returned. +func (c CanonType) Compose(part ...interface{}) (t Tag, err error) { + defer func() { + if recover() != nil { + t = Tag{} + err = language.ErrSyntax + } + }() + + var b language.Builder + if err = update(&b, part...); err != nil { + return und, err + } + b.Tag, _ = canonicalize(c, b.Tag) + return makeTag(b.Make()), err +} + +var errInvalidArgument = errors.New("invalid Extension or Variant") + +func update(b *language.Builder, part ...interface{}) (err error) { + for _, x := range part { + switch v := x.(type) { + case Tag: + b.SetTag(v.tag()) + case Base: + b.Tag.LangID = v.langID + case Script: + b.Tag.ScriptID = v.scriptID + case Region: + b.Tag.RegionID = v.regionID + case Variant: + if v.variant == "" { + err = errInvalidArgument + break + } + b.AddVariant(v.variant) + case Extension: + if v.s == "" { + err = errInvalidArgument + break + } + b.SetExt(v.s) + case []Variant: + b.ClearVariants() + for _, v := range v { + b.AddVariant(v.variant) + } + case []Extension: + b.ClearExtensions() + for _, e := range v { + b.SetExt(e.s) + } + // TODO: support parsing of raw strings based on morphology or just extensions? + case error: + if v != nil { + err = v + } + } + } + return +} + +var errInvalidWeight = errors.New("ParseAcceptLanguage: invalid weight") +var errTagListTooLarge = errors.New("tag list exceeds max length") + +// ParseAcceptLanguage parses the contents of an Accept-Language header as +// defined in http://www.ietf.org/rfc/rfc2616.txt and returns a list of Tags and +// a list of corresponding quality weights. It is more permissive than RFC 2616 +// and may return non-nil slices even if the input is not valid. +// The Tags will be sorted by highest weight first and then by first occurrence. +// Tags with a weight of zero will be dropped. An error will be returned if the +// input could not be parsed. +func ParseAcceptLanguage(s string) (tag []Tag, q []float32, err error) { + defer func() { + if recover() != nil { + tag = nil + q = nil + err = language.ErrSyntax + } + }() + + if strings.Count(s, "-") > 1000 { + return nil, nil, errTagListTooLarge + } + + var entry string + for s != "" { + if entry, s = split(s, ','); entry == "" { + continue + } + + entry, weight := split(entry, ';') + + // Scan the language. + t, err := Parse(entry) + if err != nil { + id, ok := acceptFallback[entry] + if !ok { + return nil, nil, err + } + t = makeTag(language.Tag{LangID: id}) + } + + // Scan the optional weight. + w := 1.0 + if weight != "" { + weight = consume(weight, 'q') + weight = consume(weight, '=') + // consume returns the empty string when a token could not be + // consumed, resulting in an error for ParseFloat. + if w, err = strconv.ParseFloat(weight, 32); err != nil { + return nil, nil, errInvalidWeight + } + // Drop tags with a quality weight of 0. + if w <= 0 { + continue + } + } + + tag = append(tag, t) + q = append(q, float32(w)) + } + sort.Stable(&tagSort{tag, q}) + return tag, q, nil +} + +// consume removes a leading token c from s and returns the result or the empty +// string if there is no such token. +func consume(s string, c byte) string { + if s == "" || s[0] != c { + return "" + } + return strings.TrimSpace(s[1:]) +} + +func split(s string, c byte) (head, tail string) { + if i := strings.IndexByte(s, c); i >= 0 { + return strings.TrimSpace(s[:i]), strings.TrimSpace(s[i+1:]) + } + return strings.TrimSpace(s), "" +} + +// Add hack mapping to deal with a small number of cases that occur +// in Accept-Language (with reasonable frequency). +var acceptFallback = map[string]language.Language{ + "english": _en, + "deutsch": _de, + "italian": _it, + "french": _fr, + "*": _mul, // defined in the spec to match all languages. +} + +type tagSort struct { + tag []Tag + q []float32 +} + +func (s *tagSort) Len() int { + return len(s.q) +} + +func (s *tagSort) Less(i, j int) bool { + return s.q[i] > s.q[j] +} + +func (s *tagSort) Swap(i, j int) { + s.tag[i], s.tag[j] = s.tag[j], s.tag[i] + s.q[i], s.q[j] = s.q[j], s.q[i] +} diff --git a/vendor/golang.org/x/text/language/tables.go b/vendor/golang.org/x/text/language/tables.go new file mode 100644 index 00000000000..a6573dcb215 --- /dev/null +++ b/vendor/golang.org/x/text/language/tables.go @@ -0,0 +1,298 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package language + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +const ( + _de = 269 + _en = 313 + _fr = 350 + _it = 505 + _mo = 784 + _no = 879 + _nb = 839 + _pt = 960 + _sh = 1031 + _mul = 806 + _und = 0 +) +const ( + _001 = 1 + _419 = 31 + _BR = 65 + _CA = 73 + _ES = 111 + _GB = 124 + _MD = 189 + _PT = 239 + _UK = 307 + _US = 310 + _ZZ = 358 + _XA = 324 + _XC = 326 + _XK = 334 +) +const ( + _Latn = 91 + _Hani = 57 + _Hans = 59 + _Hant = 60 + _Qaaa = 149 + _Qaai = 157 + _Qabx = 198 + _Zinh = 255 + _Zyyy = 260 + _Zzzz = 261 +) + +var regionToGroups = []uint8{ // 359 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x04, 0x01, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x04, + // Entry 40 - 7F + 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x08, 0x00, 0x04, 0x00, 0x00, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x04, 0x01, 0x00, 0x04, 0x02, 0x00, + 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x00, 0x04, + // Entry C0 - FF + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x01, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x04, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x04, + 0x00, 0x00, 0x04, 0x00, 0x04, 0x04, 0x05, 0x00, + // Entry 140 - 17F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +} // Size: 383 bytes + +var paradigmLocales = [][3]uint16{ // 3 elements + 0: [3]uint16{0x139, 0x0, 0x7c}, + 1: [3]uint16{0x13e, 0x0, 0x1f}, + 2: [3]uint16{0x3c0, 0x41, 0xef}, +} // Size: 42 bytes + +type mutualIntelligibility struct { + want uint16 + have uint16 + distance uint8 + oneway bool +} +type scriptIntelligibility struct { + wantLang uint16 + haveLang uint16 + wantScript uint8 + haveScript uint8 + distance uint8 +} +type regionIntelligibility struct { + lang uint16 + script uint8 + group uint8 + distance uint8 +} + +// matchLang holds pairs of langIDs of base languages that are typically +// mutually intelligible. Each pair is associated with a confidence and +// whether the intelligibility goes one or both ways. +var matchLang = []mutualIntelligibility{ // 113 elements + 0: {want: 0x1d1, have: 0xb7, distance: 0x4, oneway: false}, + 1: {want: 0x407, have: 0xb7, distance: 0x4, oneway: false}, + 2: {want: 0x407, have: 0x1d1, distance: 0x4, oneway: false}, + 3: {want: 0x407, have: 0x432, distance: 0x4, oneway: false}, + 4: {want: 0x43a, have: 0x1, distance: 0x4, oneway: false}, + 5: {want: 0x1a3, have: 0x10d, distance: 0x4, oneway: true}, + 6: {want: 0x295, have: 0x10d, distance: 0x4, oneway: true}, + 7: {want: 0x101, have: 0x36f, distance: 0x8, oneway: false}, + 8: {want: 0x101, have: 0x347, distance: 0x8, oneway: false}, + 9: {want: 0x5, have: 0x3e2, distance: 0xa, oneway: true}, + 10: {want: 0xd, have: 0x139, distance: 0xa, oneway: true}, + 11: {want: 0x16, have: 0x367, distance: 0xa, oneway: true}, + 12: {want: 0x21, have: 0x139, distance: 0xa, oneway: true}, + 13: {want: 0x56, have: 0x13e, distance: 0xa, oneway: true}, + 14: {want: 0x58, have: 0x3e2, distance: 0xa, oneway: true}, + 15: {want: 0x71, have: 0x3e2, distance: 0xa, oneway: true}, + 16: {want: 0x75, have: 0x139, distance: 0xa, oneway: true}, + 17: {want: 0x82, have: 0x1be, distance: 0xa, oneway: true}, + 18: {want: 0xa5, have: 0x139, distance: 0xa, oneway: true}, + 19: {want: 0xb2, have: 0x15e, distance: 0xa, oneway: true}, + 20: {want: 0xdd, have: 0x153, distance: 0xa, oneway: true}, + 21: {want: 0xe5, have: 0x139, distance: 0xa, oneway: true}, + 22: {want: 0xe9, have: 0x3a, distance: 0xa, oneway: true}, + 23: {want: 0xf0, have: 0x15e, distance: 0xa, oneway: true}, + 24: {want: 0xf9, have: 0x15e, distance: 0xa, oneway: true}, + 25: {want: 0x100, have: 0x139, distance: 0xa, oneway: true}, + 26: {want: 0x130, have: 0x139, distance: 0xa, oneway: true}, + 27: {want: 0x13c, have: 0x139, distance: 0xa, oneway: true}, + 28: {want: 0x140, have: 0x151, distance: 0xa, oneway: true}, + 29: {want: 0x145, have: 0x13e, distance: 0xa, oneway: true}, + 30: {want: 0x158, have: 0x101, distance: 0xa, oneway: true}, + 31: {want: 0x16d, have: 0x367, distance: 0xa, oneway: true}, + 32: {want: 0x16e, have: 0x139, distance: 0xa, oneway: true}, + 33: {want: 0x16f, have: 0x139, distance: 0xa, oneway: true}, + 34: {want: 0x17e, have: 0x139, distance: 0xa, oneway: true}, + 35: {want: 0x190, have: 0x13e, distance: 0xa, oneway: true}, + 36: {want: 0x194, have: 0x13e, distance: 0xa, oneway: true}, + 37: {want: 0x1a4, have: 0x1be, distance: 0xa, oneway: true}, + 38: {want: 0x1b4, have: 0x139, distance: 0xa, oneway: true}, + 39: {want: 0x1b8, have: 0x139, distance: 0xa, oneway: true}, + 40: {want: 0x1d4, have: 0x15e, distance: 0xa, oneway: true}, + 41: {want: 0x1d7, have: 0x3e2, distance: 0xa, oneway: true}, + 42: {want: 0x1d9, have: 0x139, distance: 0xa, oneway: true}, + 43: {want: 0x1e7, have: 0x139, distance: 0xa, oneway: true}, + 44: {want: 0x1f8, have: 0x139, distance: 0xa, oneway: true}, + 45: {want: 0x20e, have: 0x1e1, distance: 0xa, oneway: true}, + 46: {want: 0x210, have: 0x139, distance: 0xa, oneway: true}, + 47: {want: 0x22d, have: 0x15e, distance: 0xa, oneway: true}, + 48: {want: 0x242, have: 0x3e2, distance: 0xa, oneway: true}, + 49: {want: 0x24a, have: 0x139, distance: 0xa, oneway: true}, + 50: {want: 0x251, have: 0x139, distance: 0xa, oneway: true}, + 51: {want: 0x265, have: 0x139, distance: 0xa, oneway: true}, + 52: {want: 0x274, have: 0x48a, distance: 0xa, oneway: true}, + 53: {want: 0x28a, have: 0x3e2, distance: 0xa, oneway: true}, + 54: {want: 0x28e, have: 0x1f9, distance: 0xa, oneway: true}, + 55: {want: 0x2a3, have: 0x139, distance: 0xa, oneway: true}, + 56: {want: 0x2b5, have: 0x15e, distance: 0xa, oneway: true}, + 57: {want: 0x2b8, have: 0x139, distance: 0xa, oneway: true}, + 58: {want: 0x2be, have: 0x139, distance: 0xa, oneway: true}, + 59: {want: 0x2c3, have: 0x15e, distance: 0xa, oneway: true}, + 60: {want: 0x2ed, have: 0x139, distance: 0xa, oneway: true}, + 61: {want: 0x2f1, have: 0x15e, distance: 0xa, oneway: true}, + 62: {want: 0x2fa, have: 0x139, distance: 0xa, oneway: true}, + 63: {want: 0x2ff, have: 0x7e, distance: 0xa, oneway: true}, + 64: {want: 0x304, have: 0x139, distance: 0xa, oneway: true}, + 65: {want: 0x30b, have: 0x3e2, distance: 0xa, oneway: true}, + 66: {want: 0x31b, have: 0x1be, distance: 0xa, oneway: true}, + 67: {want: 0x31f, have: 0x1e1, distance: 0xa, oneway: true}, + 68: {want: 0x320, have: 0x139, distance: 0xa, oneway: true}, + 69: {want: 0x331, have: 0x139, distance: 0xa, oneway: true}, + 70: {want: 0x351, have: 0x139, distance: 0xa, oneway: true}, + 71: {want: 0x36a, have: 0x347, distance: 0xa, oneway: false}, + 72: {want: 0x36a, have: 0x36f, distance: 0xa, oneway: true}, + 73: {want: 0x37a, have: 0x139, distance: 0xa, oneway: true}, + 74: {want: 0x387, have: 0x139, distance: 0xa, oneway: true}, + 75: {want: 0x389, have: 0x139, distance: 0xa, oneway: true}, + 76: {want: 0x38b, have: 0x15e, distance: 0xa, oneway: true}, + 77: {want: 0x390, have: 0x139, distance: 0xa, oneway: true}, + 78: {want: 0x395, have: 0x139, distance: 0xa, oneway: true}, + 79: {want: 0x39d, have: 0x139, distance: 0xa, oneway: true}, + 80: {want: 0x3a5, have: 0x139, distance: 0xa, oneway: true}, + 81: {want: 0x3be, have: 0x139, distance: 0xa, oneway: true}, + 82: {want: 0x3c4, have: 0x13e, distance: 0xa, oneway: true}, + 83: {want: 0x3d4, have: 0x10d, distance: 0xa, oneway: true}, + 84: {want: 0x3d9, have: 0x139, distance: 0xa, oneway: true}, + 85: {want: 0x3e5, have: 0x15e, distance: 0xa, oneway: true}, + 86: {want: 0x3e9, have: 0x1be, distance: 0xa, oneway: true}, + 87: {want: 0x3fa, have: 0x139, distance: 0xa, oneway: true}, + 88: {want: 0x40c, have: 0x139, distance: 0xa, oneway: true}, + 89: {want: 0x423, have: 0x139, distance: 0xa, oneway: true}, + 90: {want: 0x429, have: 0x139, distance: 0xa, oneway: true}, + 91: {want: 0x431, have: 0x139, distance: 0xa, oneway: true}, + 92: {want: 0x43b, have: 0x139, distance: 0xa, oneway: true}, + 93: {want: 0x43e, have: 0x1e1, distance: 0xa, oneway: true}, + 94: {want: 0x445, have: 0x139, distance: 0xa, oneway: true}, + 95: {want: 0x450, have: 0x139, distance: 0xa, oneway: true}, + 96: {want: 0x461, have: 0x139, distance: 0xa, oneway: true}, + 97: {want: 0x467, have: 0x3e2, distance: 0xa, oneway: true}, + 98: {want: 0x46f, have: 0x139, distance: 0xa, oneway: true}, + 99: {want: 0x476, have: 0x3e2, distance: 0xa, oneway: true}, + 100: {want: 0x3883, have: 0x139, distance: 0xa, oneway: true}, + 101: {want: 0x480, have: 0x139, distance: 0xa, oneway: true}, + 102: {want: 0x482, have: 0x139, distance: 0xa, oneway: true}, + 103: {want: 0x494, have: 0x3e2, distance: 0xa, oneway: true}, + 104: {want: 0x49d, have: 0x139, distance: 0xa, oneway: true}, + 105: {want: 0x4ac, have: 0x529, distance: 0xa, oneway: true}, + 106: {want: 0x4b4, have: 0x139, distance: 0xa, oneway: true}, + 107: {want: 0x4bc, have: 0x3e2, distance: 0xa, oneway: true}, + 108: {want: 0x4e5, have: 0x15e, distance: 0xa, oneway: true}, + 109: {want: 0x4f2, have: 0x139, distance: 0xa, oneway: true}, + 110: {want: 0x512, have: 0x139, distance: 0xa, oneway: true}, + 111: {want: 0x518, have: 0x139, distance: 0xa, oneway: true}, + 112: {want: 0x52f, have: 0x139, distance: 0xa, oneway: true}, +} // Size: 702 bytes + +// matchScript holds pairs of scriptIDs where readers of one script +// can typically also read the other. Each is associated with a confidence. +var matchScript = []scriptIntelligibility{ // 26 elements + 0: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x5b, haveScript: 0x20, distance: 0x5}, + 1: {wantLang: 0x432, haveLang: 0x432, wantScript: 0x20, haveScript: 0x5b, distance: 0x5}, + 2: {wantLang: 0x58, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa}, + 3: {wantLang: 0xa5, haveLang: 0x139, wantScript: 0xe, haveScript: 0x5b, distance: 0xa}, + 4: {wantLang: 0x1d7, haveLang: 0x3e2, wantScript: 0x8, haveScript: 0x20, distance: 0xa}, + 5: {wantLang: 0x210, haveLang: 0x139, wantScript: 0x2e, haveScript: 0x5b, distance: 0xa}, + 6: {wantLang: 0x24a, haveLang: 0x139, wantScript: 0x4f, haveScript: 0x5b, distance: 0xa}, + 7: {wantLang: 0x251, haveLang: 0x139, wantScript: 0x53, haveScript: 0x5b, distance: 0xa}, + 8: {wantLang: 0x2b8, haveLang: 0x139, wantScript: 0x58, haveScript: 0x5b, distance: 0xa}, + 9: {wantLang: 0x304, haveLang: 0x139, wantScript: 0x6f, haveScript: 0x5b, distance: 0xa}, + 10: {wantLang: 0x331, haveLang: 0x139, wantScript: 0x76, haveScript: 0x5b, distance: 0xa}, + 11: {wantLang: 0x351, haveLang: 0x139, wantScript: 0x22, haveScript: 0x5b, distance: 0xa}, + 12: {wantLang: 0x395, haveLang: 0x139, wantScript: 0x83, haveScript: 0x5b, distance: 0xa}, + 13: {wantLang: 0x39d, haveLang: 0x139, wantScript: 0x36, haveScript: 0x5b, distance: 0xa}, + 14: {wantLang: 0x3be, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa}, + 15: {wantLang: 0x3fa, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa}, + 16: {wantLang: 0x40c, haveLang: 0x139, wantScript: 0xd6, haveScript: 0x5b, distance: 0xa}, + 17: {wantLang: 0x450, haveLang: 0x139, wantScript: 0xe6, haveScript: 0x5b, distance: 0xa}, + 18: {wantLang: 0x461, haveLang: 0x139, wantScript: 0xe9, haveScript: 0x5b, distance: 0xa}, + 19: {wantLang: 0x46f, haveLang: 0x139, wantScript: 0x2c, haveScript: 0x5b, distance: 0xa}, + 20: {wantLang: 0x476, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa}, + 21: {wantLang: 0x4b4, haveLang: 0x139, wantScript: 0x5, haveScript: 0x5b, distance: 0xa}, + 22: {wantLang: 0x4bc, haveLang: 0x3e2, wantScript: 0x5b, haveScript: 0x20, distance: 0xa}, + 23: {wantLang: 0x512, haveLang: 0x139, wantScript: 0x3e, haveScript: 0x5b, distance: 0xa}, + 24: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3b, haveScript: 0x3c, distance: 0xf}, + 25: {wantLang: 0x529, haveLang: 0x529, wantScript: 0x3c, haveScript: 0x3b, distance: 0x13}, +} // Size: 232 bytes + +var matchRegion = []regionIntelligibility{ // 15 elements + 0: {lang: 0x3a, script: 0x0, group: 0x4, distance: 0x4}, + 1: {lang: 0x3a, script: 0x0, group: 0x84, distance: 0x4}, + 2: {lang: 0x139, script: 0x0, group: 0x1, distance: 0x4}, + 3: {lang: 0x139, script: 0x0, group: 0x81, distance: 0x4}, + 4: {lang: 0x13e, script: 0x0, group: 0x3, distance: 0x4}, + 5: {lang: 0x13e, script: 0x0, group: 0x83, distance: 0x4}, + 6: {lang: 0x3c0, script: 0x0, group: 0x3, distance: 0x4}, + 7: {lang: 0x3c0, script: 0x0, group: 0x83, distance: 0x4}, + 8: {lang: 0x529, script: 0x3c, group: 0x2, distance: 0x4}, + 9: {lang: 0x529, script: 0x3c, group: 0x82, distance: 0x4}, + 10: {lang: 0x3a, script: 0x0, group: 0x80, distance: 0x5}, + 11: {lang: 0x139, script: 0x0, group: 0x80, distance: 0x5}, + 12: {lang: 0x13e, script: 0x0, group: 0x80, distance: 0x5}, + 13: {lang: 0x3c0, script: 0x0, group: 0x80, distance: 0x5}, + 14: {lang: 0x529, script: 0x3c, group: 0x80, distance: 0x5}, +} // Size: 114 bytes + +// Total table size 1473 bytes (1KiB); checksum: 7BB90B5C diff --git a/vendor/golang.org/x/text/language/tags.go b/vendor/golang.org/x/text/language/tags.go new file mode 100644 index 00000000000..42ea7926660 --- /dev/null +++ b/vendor/golang.org/x/text/language/tags.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package language + +import "golang.org/x/text/internal/language/compact" + +// TODO: Various sets of commonly use tags and regions. + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func MustParse(s string) Tag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParse is like Parse, but panics if the given BCP 47 tag cannot be parsed. +// It simplifies safe initialization of Tag values. +func (c CanonType) MustParse(s string) Tag { + t, err := c.Parse(s) + if err != nil { + panic(err) + } + return t +} + +// MustParseBase is like ParseBase, but panics if the given base cannot be parsed. +// It simplifies safe initialization of Base values. +func MustParseBase(s string) Base { + b, err := ParseBase(s) + if err != nil { + panic(err) + } + return b +} + +// MustParseScript is like ParseScript, but panics if the given script cannot be +// parsed. It simplifies safe initialization of Script values. +func MustParseScript(s string) Script { + scr, err := ParseScript(s) + if err != nil { + panic(err) + } + return scr +} + +// MustParseRegion is like ParseRegion, but panics if the given region cannot be +// parsed. It simplifies safe initialization of Region values. +func MustParseRegion(s string) Region { + r, err := ParseRegion(s) + if err != nil { + panic(err) + } + return r +} + +var ( + und = Tag{} + + Und Tag = Tag{} + + Afrikaans Tag = Tag(compact.Afrikaans) + Amharic Tag = Tag(compact.Amharic) + Arabic Tag = Tag(compact.Arabic) + ModernStandardArabic Tag = Tag(compact.ModernStandardArabic) + Azerbaijani Tag = Tag(compact.Azerbaijani) + Bulgarian Tag = Tag(compact.Bulgarian) + Bengali Tag = Tag(compact.Bengali) + Catalan Tag = Tag(compact.Catalan) + Czech Tag = Tag(compact.Czech) + Danish Tag = Tag(compact.Danish) + German Tag = Tag(compact.German) + Greek Tag = Tag(compact.Greek) + English Tag = Tag(compact.English) + AmericanEnglish Tag = Tag(compact.AmericanEnglish) + BritishEnglish Tag = Tag(compact.BritishEnglish) + Spanish Tag = Tag(compact.Spanish) + EuropeanSpanish Tag = Tag(compact.EuropeanSpanish) + LatinAmericanSpanish Tag = Tag(compact.LatinAmericanSpanish) + Estonian Tag = Tag(compact.Estonian) + Persian Tag = Tag(compact.Persian) + Finnish Tag = Tag(compact.Finnish) + Filipino Tag = Tag(compact.Filipino) + French Tag = Tag(compact.French) + CanadianFrench Tag = Tag(compact.CanadianFrench) + Gujarati Tag = Tag(compact.Gujarati) + Hebrew Tag = Tag(compact.Hebrew) + Hindi Tag = Tag(compact.Hindi) + Croatian Tag = Tag(compact.Croatian) + Hungarian Tag = Tag(compact.Hungarian) + Armenian Tag = Tag(compact.Armenian) + Indonesian Tag = Tag(compact.Indonesian) + Icelandic Tag = Tag(compact.Icelandic) + Italian Tag = Tag(compact.Italian) + Japanese Tag = Tag(compact.Japanese) + Georgian Tag = Tag(compact.Georgian) + Kazakh Tag = Tag(compact.Kazakh) + Khmer Tag = Tag(compact.Khmer) + Kannada Tag = Tag(compact.Kannada) + Korean Tag = Tag(compact.Korean) + Kirghiz Tag = Tag(compact.Kirghiz) + Lao Tag = Tag(compact.Lao) + Lithuanian Tag = Tag(compact.Lithuanian) + Latvian Tag = Tag(compact.Latvian) + Macedonian Tag = Tag(compact.Macedonian) + Malayalam Tag = Tag(compact.Malayalam) + Mongolian Tag = Tag(compact.Mongolian) + Marathi Tag = Tag(compact.Marathi) + Malay Tag = Tag(compact.Malay) + Burmese Tag = Tag(compact.Burmese) + Nepali Tag = Tag(compact.Nepali) + Dutch Tag = Tag(compact.Dutch) + Norwegian Tag = Tag(compact.Norwegian) + Punjabi Tag = Tag(compact.Punjabi) + Polish Tag = Tag(compact.Polish) + Portuguese Tag = Tag(compact.Portuguese) + BrazilianPortuguese Tag = Tag(compact.BrazilianPortuguese) + EuropeanPortuguese Tag = Tag(compact.EuropeanPortuguese) + Romanian Tag = Tag(compact.Romanian) + Russian Tag = Tag(compact.Russian) + Sinhala Tag = Tag(compact.Sinhala) + Slovak Tag = Tag(compact.Slovak) + Slovenian Tag = Tag(compact.Slovenian) + Albanian Tag = Tag(compact.Albanian) + Serbian Tag = Tag(compact.Serbian) + SerbianLatin Tag = Tag(compact.SerbianLatin) + Swedish Tag = Tag(compact.Swedish) + Swahili Tag = Tag(compact.Swahili) + Tamil Tag = Tag(compact.Tamil) + Telugu Tag = Tag(compact.Telugu) + Thai Tag = Tag(compact.Thai) + Turkish Tag = Tag(compact.Turkish) + Ukrainian Tag = Tag(compact.Ukrainian) + Urdu Tag = Tag(compact.Urdu) + Uzbek Tag = Tag(compact.Uzbek) + Vietnamese Tag = Tag(compact.Vietnamese) + Chinese Tag = Tag(compact.Chinese) + SimplifiedChinese Tag = Tag(compact.SimplifiedChinese) + TraditionalChinese Tag = Tag(compact.TraditionalChinese) + Zulu Tag = Tag(compact.Zulu) +) diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5eaf..2a7cf70da6e 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df1d8..f3ab0a2e126 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -100,6 +142,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +175,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f5fc..f1931d10eeb 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711b59..96db9daf314 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -79,10 +79,10 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { @@ -103,7 +103,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,7 +117,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 1a3a5b44f5c..76f910ecec9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -80,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -681,7 +692,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -751,7 +762,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -759,7 +770,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -840,7 +851,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: cfg.goListOverlayFile, + Overlay: state.overlay, } } @@ -851,11 +862,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -879,6 +887,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21b54..5fcad6ea6db 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,46 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 34306ddd390..2ecc64238e8 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,7 +31,6 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -46,18 +45,17 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/48226 -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -76,10 +74,10 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -104,25 +102,37 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // Be sure to update loadmode_string.go when adding new items! ) const ( + // LoadFiles loads the name and file names for the initial packages. + // // Deprecated: LoadFiles exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + // LoadImports loads the name, file names, and import mapping for the initial packages. + // // Deprecated: LoadImports exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports + // LoadTypes loads exported type information for the initial packages. + // // Deprecated: LoadTypes exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + // LoadSyntax loads typed syntax for the initial packages. + // // Deprecated: LoadSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps @@ -134,13 +144,7 @@ const ( // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to Load do not modify this struct. -// -// TODO(adonovan): #67702: this is currently false: in fact, -// calls to [Load] do not modify the public fields of this struct, but -// may modify hidden fields, so concurrent calls to [Load] must not -// use the same Config. But perhaps we should reestablish the -// documented invariant. +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -171,19 +175,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -230,21 +225,24 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // goListOverlayFile is the JSON file that encodes the Overlay - // mapping, used by 'go list -overlay=...' - goListOverlayFile string + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -261,7 +259,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -324,21 +322,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } // go list fallback - // + // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() - cfg.goListOverlayFile = overlay - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -376,16 +377,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -682,18 +681,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -739,9 +739,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -755,7 +752,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -764,6 +761,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -795,7 +793,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -820,9 +818,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -841,63 +840,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -910,16 +922,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -961,12 +1002,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -981,31 +1024,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1041,6 +1063,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1146,7 +1172,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1157,16 +1183,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1219,6 +1249,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1283,8 +1317,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1301,20 +1338,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1329,18 +1374,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1499,6 +1547,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } @@ -1507,4 +1559,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b727..df14ffd94dc 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index d648c3d071b..16ed3c1780b 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying} [EKPRUTrC] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,7 +63,7 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTrC]; +// - The TT operators are encoded as [EKPRUTrCa]; // two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; @@ -106,6 +106,7 @@ const ( opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators opAt = 'A' // .At(i) (Tuple) @@ -227,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -279,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - - if tname.IsAlias() { - // type alias - if r := find(obj, T, path, nil); r != nil { + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { - // generic named type - return Path(r), nil - } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { + return Path(r), nil + } + + } else if tname.IsAlias() { + // legacy alias + if r := find(obj, T, path); r != nil { + return Path(r), nil } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -306,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -314,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -326,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -441,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { return r } - if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -489,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -500,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) + } + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -529,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, op, i) - if r := find(obj, tparam, path2, seen); r != nil { + if r := f.find(tparam, path2); r != nil { return r } } @@ -620,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -657,6 +692,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*types.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 00000000000..754380351e8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 00000000000..b81ce0c330f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 00000000000..8d824f7140f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,517 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to any values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary any values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 + + // ptrMap records pointer identity. + ptrMap map[any]uint32 + + // sigTParams holds type parameters from the signature being hashed. + // Signatures are considered identical modulo renaming of type parameters, so + // within the scope of a signature type the identity of the signature's type + // parameters is just their index. + // + // Since the language does not currently support referring to uninstantiated + // generic types or functions, and instantiated signatures do not have type + // parameter lists, we should never encounter a second non-empty type + // parameter list when hashing a generic signature. + sigTParams *types.TypeParamList +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{ + memo: make(map[types.Type]uint32), + ptrMap: make(map[any]uint32), + sigTParams: nil, + } +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.Hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + // Use a separate hasher for types inside of the signature, where type + // parameter identity is modified to be (index, constraint). We must use a + // new memo for this hasher as type identity may be affected by this + // masking. For example, in func[T any](*T), the identity of *T depends on + // whether we are mapping the argument in isolation, or recursively as part + // of hashing the signature. + // + // We should never encounter a generic signature while hashing another + // generic signature, but defensively set sigTParams only if h.mask is + // unset. + tparams := t.TypeParams() + if h.sigTParams == nil && tparams.Len() != 0 { + h = Hasher{ + // There may be something more efficient than discarding the existing + // memo, but it would require detecting whether types are 'tainted' by + // references to type parameters. + memo: make(map[types.Type]uint32), + // Re-using ptrMap ensures that pointer identity is preserved in this + // hasher. + ptrMap: h.ptrMap, + sigTParams: tparams, + } + } + + for i := 0; i < tparams.Len(); i++ { + tparam := tparams.At(i) + hash += 7 * h.Hash(tparam.Constraint()) + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + hash := h.hashPtr(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.Hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} + +func (h Hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.Hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns a hash of the type parameter t, with a hash value +// depending on whether t is contained in h.sigTParams. +// +// If h.sigTParams is set and contains t, then we are in the process of hashing +// a signature, and the hash value of t must depend only on t's index and +// constraint: signatures are considered identical modulo type parameter +// renaming. To avoid infinite recursion, we only hash the type parameter +// index, and rely on types.Identical to handle signatures where constraints +// are not identical. +// +// Otherwise the hash of t depends only on t's pointer identity. +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { + if h.sigTParams != nil { + i := t.Index() + if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { + return 9173 + 3*uint32(i) + } + } + return h.hashPtr(t.Obj()) +} + +// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that +// pointers values are not dependent on the GC. +func (h Hasher) hashPtr(ptr any) uint32 { + if hash, ok := h.ptrMap[ptr]; ok { + return hash + } + hash := uint32(reflect.ValueOf(ptr).Pointer()) + h.ptrMap[ptr] = hash + return hash +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h Hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashPtr(t.Obj()) + + case *types.TypeParam: + return h.hashPtr(t.Obj()) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 00000000000..f7666028fe5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 00000000000..9dda6a25df7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee457..b9425f5a209 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index c027b9f315f..00000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b3299548419..7716a3331db 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,31 +11,51 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) +} + +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *types.Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *types.Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *types.Alias) *types.Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) } // Enabled reports whether [NewAlias] should create [types.Alias] types. @@ -56,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2a9a..d79a605ed13 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df91124a4..e6c5d51f8e5 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -232,14 +232,19 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified: emitted by cmd/compile since go1.20. _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f315a..7dfc31a37d7 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,11 +242,30 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -51,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -223,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -507,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +760,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +994,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa03653c..e260c0e8dbf 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See iexport.go for the export data format. // This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. @@ -53,6 +53,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -557,19 +558,28 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 + r.declare(obj) case constTag: typ, val := r.value() @@ -589,6 +599,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -615,7 +628,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +658,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +875,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 00000000000..7586bfaca60 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d058..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40fd8..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 00000000000..907c8557a54 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b65a..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cadab..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3a5..00000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c077068877..1db408613c9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +168,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -471,7 +474,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 2e59ff8558c..e333efc87f9 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2acd85851e3..f6cb37c5c3d 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,10 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 - - // aliases determines whether types.Aliases should be created - aliases bool + version Version // sync indicates whether the file uses sync markers. sync bool @@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, - //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to @@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -248,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a4fc..c17a12399d0 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63ed..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7adfe..00000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b28ca..50534a29553 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef7170..1520b73afb9 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5f2f..582ad56d3e0 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 00000000000..53af9df22b3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index a928acf29fa..cdaac9ab34d 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, - {"QUICConfig.EnableStoreSessionEvent", Field, 23}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a36cd..00000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 00000000000..0b84acc5c7f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,140 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantiation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = types.Unalias(V) + T = types.Unalias(T) + + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = types.NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := types.Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := types.Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 00000000000..6e83c6fb1a2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,150 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 00000000000..0ade5c2949e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := 0; i < n; i++ { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + if params := t.TypeParams(); params.Len() > args.Len() { + return true // this is an uninstantiated named type. + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 00000000000..93c80fdc96c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 00000000000..cbd12f80131 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 00000000000..7350bb702a1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 00000000000..4957f021641 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e05381ce..131caab2847 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b75e8..ba6f4f4ebd5 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,8 +6,6 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the @@ -15,11 +13,11 @@ import ( // It also reports whether a Pointer was present. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 83923286120..df3ea521254 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -63,3 +65,57 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { return other.Name() } } + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named".) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName +} + +// TypeParams is a light shim around t.TypeParams(). +// (go/types.Alias).TypeParams requires >= 1.23. +func TypeParams(t NamedOrAlias) *types.TypeParamList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeParams(t) + case *types.Named: + return t.TypeParams() + } + return nil +} + +// TypeArgs is a light shim around t.TypeArgs(). +// (go/types.Alias).TypeArgs requires >= 1.23. +func TypeArgs(t NamedOrAlias) *types.TypeList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeArgs(t) + case *types.Named: + return t.TypeArgs() + } + return nil +} + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go new file mode 100644 index 00000000000..179063d4848 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import "go/build/constraint" + +// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). +// Otherwise nil. +// +// Deprecate once x/tools is after go1.21. +var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go similarity index 75% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go121.go rename to vendor/golang.org/x/tools/internal/versions/constraint_go121.go index b7ef216dfec..38011407d5f 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go @@ -7,8 +7,8 @@ package versions +import "go/build/constraint" + func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } + ConstraintGoVersion = constraint.GoVersion } diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a53b4..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go deleted file mode 100644 index f65beed9d83..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package versions - -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa126cd..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21fa2..0fc10ce4eb5 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,29 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d3349e..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index aac5db62c98..00000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/gonum.org/v1/gonum/AUTHORS b/vendor/gonum.org/v1/gonum/AUTHORS index 932c529804d..8e5896dbf1c 100644 --- a/vendor/gonum.org/v1/gonum/AUTHORS +++ b/vendor/gonum.org/v1/gonum/AUTHORS @@ -34,6 +34,7 @@ Davor Kapsa <davor.kapsa@gmail.com> DeepMind Technologies Delaney Gillilan <delaneygillilan@gmail.com> Dezmond Goff <goff.dezmond@gmail.com> +Dirk Müller <dirk@dmllr.de> Dong-hee Na <donghee.na92@gmail.com> Dustin Spicuzza <dustin@virtualroadside.com> Egon Elbre <egonelbre@gmail.com> @@ -50,6 +51,7 @@ Francesc Campoy <campoy@golang.org> Google Inc Gustaf Johansson <gustaf@pinon.se> Hossein Zolfi <hossein.zolfi@gmail.com> +Huang Peng Fei <huangpengfei@outlook.com> Iakov Davydov <iakov.davydov@unil.ch> Igor Mikushkin <igor.mikushkin@gmail.com> Iskander Sharipov <quasilyte@gmail.com> @@ -73,6 +75,7 @@ Joseph Watson <jtwatson@linux-consulting.us> Josh Wilson <josh.craig.wilson@gmail.com> Julien Roland <juroland@gmail.com> Kai Trukenmüller <ktye78@gmail.com> +Kendall Marcus <knowmost@outlook.com> Kent English <kent.english@gmail.com> Kevin C. Zimmerman <kevinczimmerman@gmail.com> Kirill Motkov <motkov.kirill@gmail.com> @@ -121,6 +124,8 @@ The University of Minnesota The University of Washington Thomas Berg <tomfuture@gmail.com> Tobin Harding <me@tobin.cc> +Tom Payne <twpayne@gmail.com> +Tristan Nicholls <tvk.nicholls@gmail.com> Valentin Deleplace <deleplace2015@gmail.com> Vincent Thiery <vjmthiery@gmail.com> Vladimír Chalupecký <vladimir.chalupecky@gmail.com> diff --git a/vendor/gonum.org/v1/gonum/CONTRIBUTORS b/vendor/gonum.org/v1/gonum/CONTRIBUTORS index 711ad614bd3..e367595b925 100644 --- a/vendor/gonum.org/v1/gonum/CONTRIBUTORS +++ b/vendor/gonum.org/v1/gonum/CONTRIBUTORS @@ -42,6 +42,7 @@ David Samborski <bloggingarrow@gmail.com> Davor Kapsa <davor.kapsa@gmail.com> Delaney Gillilan <delaneygillilan@gmail.com> Dezmond Goff <goff.dezmond@gmail.com> +Dirk Müller <dirk@dmllr.de> Dong-hee Na <donghee.na92@gmail.com> Dustin Spicuzza <dustin@virtualroadside.com> Egon Elbre <egonelbre@gmail.com> @@ -57,6 +58,7 @@ Fazlul Shahriar <fshahriar@gmail.com> Francesc Campoy <campoy@golang.org> Gustaf Johansson <gustaf@pinon.se> Hossein Zolfi <hossein.zolfi@gmail.com> +Huang Peng Fei <huangpengfei@outlook.com> Iakov Davydov <iakov.davydov@unil.ch> Igor Mikushkin <igor.mikushkin@gmail.com> Iskander Sharipov <quasilyte@gmail.com> @@ -81,6 +83,7 @@ Joseph Watson <jtwatson@linux-consulting.us> Josh Wilson <josh.craig.wilson@gmail.com> Julien Roland <juroland@gmail.com> Kai Trukenmüller <ktye78@gmail.com> +Kendall Marcus <knowmost@outlook.com> Kent English <kent.english@gmail.com> Kevin C. Zimmerman <kevinczimmerman@gmail.com> Kirill Motkov <motkov.kirill@gmail.com> @@ -124,6 +127,8 @@ Takeshi Yoneda <cz.rk.t0415y.g@gmail.com> Tamir Hyman <hyman.tamir@gmail.com> Thomas Berg <tomfuture@gmail.com> Tobin Harding <me@tobin.cc> +Tom Payne <twpayne@gmail.com> +Tristan Nicholls <tvk.nicholls@gmail.com> Valentin Deleplace <deleplace2015@gmail.com> Vincent Thiery <vjmthiery@gmail.com> Vladimír Chalupecký <vladimir.chalupecky@gmail.com> diff --git a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go index 6cc6517f1b9..695557d13a9 100644 --- a/vendor/gonum.org/v1/gonum/blas/blas64/conv.go +++ b/vendor/gonum.org/v1/gonum/blas/blas64/conv.go @@ -261,17 +261,3 @@ func (t TriangularBand) From(a TriangularBandCols) { } dst.From(src) } - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go index c459e1d87e3..bfafb96efcd 100644 --- a/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go +++ b/vendor/gonum.org/v1/gonum/blas/cblas128/conv.go @@ -263,17 +263,3 @@ func (t TriangularBand) From(a TriangularBandCols) { } dst.From(src) } - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go index 61a8b8b5d0b..5a5c1110121 100644 --- a/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go +++ b/vendor/gonum.org/v1/gonum/blas/gonum/gonum.go @@ -21,20 +21,6 @@ const ( minParBlock = 4 // minimum number of blocks needed to go parallel ) -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a > b { - return b - } - return a -} - // blocks returns the number of divisions of the dimension length with the given // block size. func blocks(dim, bsize int) int { diff --git a/vendor/gonum.org/v1/gonum/floats/floats.go b/vendor/gonum.org/v1/gonum/floats/floats.go index 5db73a0573a..68c4e65c7e2 100644 --- a/vendor/gonum.org/v1/gonum/floats/floats.go +++ b/vendor/gonum.org/v1/gonum/floats/floats.go @@ -7,6 +7,7 @@ package floats import ( "errors" "math" + "slices" "sort" "gonum.org/v1/gonum/floats/scalar" @@ -633,10 +634,10 @@ func Prod(s []float64) float64 { } // Reverse reverses the order of elements in the slice. +// +// Deprecated: This function simply calls [slices.Reverse]. func Reverse(s []float64) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } + slices.Reverse(s) } // Same returns true when the input slices have the same length and all diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go deleted file mode 100644 index 563df6f2e65..00000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/ordered/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright ©2017 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ordered provides common sort ordering types. -package ordered // import "gonum.org/v1/gonum/graph/internal/ordered" diff --git a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go b/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go deleted file mode 100644 index 1661125c2c7..00000000000 --- a/vendor/gonum.org/v1/gonum/graph/internal/ordered/sort.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright ©2015 The Gonum Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ordered - -import ( - "sort" - - "gonum.org/v1/gonum/graph" -) - -// ByID sorts a slice of graph.Node by ID. -func ByID(n []graph.Node) { - sort.Slice(n, func(i, j int) bool { return n[i].ID() < n[j].ID() }) -} - -// BySliceValues sorts a slice of []int64 lexically by the values of the -// []int64. -func BySliceValues(c [][]int64) { - sort.Slice(c, func(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v < b[k] { - return true - } - if v > b[k] { - return false - } - } - return len(a) < len(b) - }) -} - -// BySliceIDs sorts a slice of []graph.Node lexically by the IDs of the -// []graph.Node. -func BySliceIDs(c [][]graph.Node) { - sort.Slice(c, func(i, j int) bool { - a, b := c[i], c[j] - l := len(a) - if len(b) < l { - l = len(b) - } - for k, v := range a[:l] { - if v.ID() < b[k].ID() { - return true - } - if v.ID() > b[k].ID() { - return false - } - } - return len(a) < len(b) - }) -} - -// Int64s sorts a slice of int64. -func Int64s(s []int64) { - sort.Slice(s, func(i, j int) bool { return s[i] < s[j] }) -} - -// LinesByIDs sort a slice of graph.LinesByIDs lexically by the From IDs, -// then by the To IDs, finally by the Line IDs. -func LinesByIDs(n []graph.Line) { - sort.Slice(n, func(i, j int) bool { - a, b := n[i], n[j] - if a.From().ID() != b.From().ID() { - return a.From().ID() < b.From().ID() - } - if a.To().ID() != b.To().ID() { - return a.To().ID() < b.To().ID() - } - return n[i].ID() < n[j].ID() - }) -} - -// Reverse reverses the order of nodes. -func Reverse(nodes []graph.Node) { - for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 { - nodes[i], nodes[j] = nodes[j], nodes[i] - } -} diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go index a7031e4a1b7..90189f09ea0 100644 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/same.go +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same.go @@ -23,15 +23,6 @@ func same(a, b Nodes) bool { // hash maps are passed as a pointer to a runtime Hmap struct. A map is // not seen by the runtime as a pointer though, so we use unsafe to get // the maps' pointer values to compare. -func intsSame(a, b Ints) bool { - return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) -} - -// int64sSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use unsafe to get -// the maps' pointer values to compare. -func int64sSame(a, b Int64s) bool { +func intsSame[T Int](a, b Ints[T]) bool { return *(*uintptr)(unsafe.Pointer(&a)) == *(*uintptr)(unsafe.Pointer(&b)) } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go index 368823d4c83..ae8aa4d71e6 100644 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/same_appengine.go @@ -23,15 +23,6 @@ func same(a, b Nodes) bool { // hash maps are passed as a pointer to a runtime Hmap struct. A map is // not seen by the runtime as a pointer though, so we use reflect to get // the maps' pointer values to compare. -func intsSame(a, b Ints) bool { - return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() -} - -// int64sSame determines whether two sets are backed by the same store. In the -// current implementation using hash maps it makes use of the fact that -// hash maps are passed as a pointer to a runtime Hmap struct. A map is -// not seen by the runtime as a pointer though, so we use reflect to get -// the maps' pointer values to compare. -func int64sSame(a, b Int64s) bool { +func intsSame[T Int](a, b Ints[T]) bool { return reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer() } diff --git a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go index 8067e281677..2175e480be3 100644 --- a/vendor/gonum.org/v1/gonum/graph/internal/set/set.go +++ b/vendor/gonum.org/v1/gonum/graph/internal/set/set.go @@ -6,36 +6,34 @@ package set import "gonum.org/v1/gonum/graph" -// Ints is a set of int identifiers. -type Ints map[int]struct{} +type Int interface{ ~int | ~int64 } -// The simple accessor methods for Ints are provided to allow ease of -// implementation change should the need arise. +type Ints[T Int] map[T]struct{} // Add inserts an element into the set. -func (s Ints) Add(e int) { +func (s Ints[T]) Add(e T) { s[e] = struct{}{} } // Has reports the existence of the element in the set. -func (s Ints) Has(e int) bool { +func (s Ints[T]) Has(e T) bool { _, ok := s[e] return ok } // Remove deletes the specified element from the set. -func (s Ints) Remove(e int) { +func (s Ints[T]) Remove(e T) { delete(s, e) } // Count reports the number of elements stored in the set. -func (s Ints) Count() int { +func (s Ints[T]) Count() int { return len(s) } // IntsEqual reports set equality between the parameters. Sets are equal if // and only if they have the same elements. -func IntsEqual(a, b Ints) bool { +func IntsEqual[T Int](a, b Ints[T]) bool { if intsSame(a, b) { return true } @@ -53,53 +51,6 @@ func IntsEqual(a, b Ints) bool { return true } -// Int64s is a set of int64 identifiers. -type Int64s map[int64]struct{} - -// The simple accessor methods for Ints are provided to allow ease of -// implementation change should the need arise. - -// Add inserts an element into the set. -func (s Int64s) Add(e int64) { - s[e] = struct{}{} -} - -// Has reports the existence of the element in the set. -func (s Int64s) Has(e int64) bool { - _, ok := s[e] - return ok -} - -// Remove deletes the specified element from the set. -func (s Int64s) Remove(e int64) { - delete(s, e) -} - -// Count reports the number of elements stored in the set. -func (s Int64s) Count() int { - return len(s) -} - -// Int64sEqual reports set equality between the parameters. Sets are equal if -// and only if they have the same elements. -func Int64sEqual(a, b Int64s) bool { - if int64sSame(a, b) { - return true - } - - if len(a) != len(b) { - return false - } - - for e := range a { - if _, ok := b[e]; !ok { - return false - } - } - - return true -} - // Nodes is a set of nodes keyed in their integer identifiers. type Nodes map[int64]graph.Node diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go b/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go index 0a7414f7d72..3f2d799f973 100644 --- a/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go +++ b/vendor/gonum.org/v1/gonum/graph/iterator/lines_map_safe.go @@ -16,10 +16,11 @@ import ( // Lines implements the graph.Lines interfaces. // The iteration order of Lines is randomized. type Lines struct { - lines reflect.Value - iter *reflect.MapIter - pos int - curr graph.Line + iter reflect.MapIter + pos, len int + curr graph.Line + value reflect.Value + lines reflect.Value } // NewLines returns a Lines initialized with the provided lines, a @@ -30,23 +31,26 @@ type Lines struct { // the call to NewLines. func NewLines(lines map[int64]graph.Line) *Lines { rv := reflect.ValueOf(lines) - return &Lines{lines: rv, iter: rv.MapRange()} + l := &Lines{lines: rv, len: len(lines)} + l.iter.Reset(rv) + l.value = reflect.ValueOf(&l.curr).Elem() + return l } // Len returns the remaining number of lines to be iterated over. func (l *Lines) Len() int { - return l.lines.Len() - l.pos + return l.len - l.pos } // Next returns whether the next call of Line will return a valid line. func (l *Lines) Next() bool { - if l.pos >= l.lines.Len() { + if l.pos >= l.len { return false } ok := l.iter.Next() if ok { l.pos++ - l.curr = l.iter.Value().Interface().(graph.Line) + l.value.SetIterValue(&l.iter) } return ok } @@ -61,7 +65,7 @@ func (l *Lines) Line() graph.Line { func (l *Lines) Reset() { l.curr = nil l.pos = 0 - l.iter = l.lines.MapRange() + l.iter.Reset(l.lines) } // LineSlice returns all the remaining lines in the iterator and advances @@ -73,19 +77,21 @@ func (l *Lines) LineSlice() []graph.Line { } lines := make([]graph.Line, 0, l.Len()) for l.iter.Next() { - lines = append(lines, l.iter.Value().Interface().(graph.Line)) + l.value.SetIterValue(&l.iter) + lines = append(lines, l.curr) } - l.pos = l.lines.Len() + l.pos = l.len return lines } // WeightedLines implements the graph.WeightedLines interfaces. // The iteration order of WeightedLines is randomized. type WeightedLines struct { - lines reflect.Value - iter *reflect.MapIter - pos int - curr graph.WeightedLine + iter reflect.MapIter + pos, len int + curr graph.WeightedLine + value reflect.Value + lines reflect.Value } // NewWeightedLines returns a WeightedLines initialized with the provided lines, a @@ -96,23 +102,26 @@ type WeightedLines struct { // the call to NewWeightedLines. func NewWeightedLines(lines map[int64]graph.WeightedLine) *WeightedLines { rv := reflect.ValueOf(lines) - return &WeightedLines{lines: rv, iter: rv.MapRange()} + l := &WeightedLines{lines: rv, len: len(lines)} + l.iter.Reset(rv) + l.value = reflect.ValueOf(&l.curr).Elem() + return l } // Len returns the remaining number of lines to be iterated over. func (l *WeightedLines) Len() int { - return l.lines.Len() - l.pos + return l.len - l.pos } // Next returns whether the next call of WeightedLine will return a valid line. func (l *WeightedLines) Next() bool { - if l.pos >= l.lines.Len() { + if l.pos >= l.len { return false } ok := l.iter.Next() if ok { l.pos++ - l.curr = l.iter.Value().Interface().(graph.WeightedLine) + l.value.SetIterValue(&l.iter) } return ok } @@ -127,7 +136,7 @@ func (l *WeightedLines) WeightedLine() graph.WeightedLine { func (l *WeightedLines) Reset() { l.curr = nil l.pos = 0 - l.iter = l.lines.MapRange() + l.iter.Reset(l.lines) } // WeightedLineSlice returns all the remaining lines in the iterator and advances @@ -139,8 +148,9 @@ func (l *WeightedLines) WeightedLineSlice() []graph.WeightedLine { } lines := make([]graph.WeightedLine, 0, l.Len()) for l.iter.Next() { - lines = append(lines, l.iter.Value().Interface().(graph.WeightedLine)) + l.value.SetIterValue(&l.iter) + lines = append(lines, l.curr) } - l.pos = l.lines.Len() + l.pos = l.len return lines } diff --git a/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go b/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go index 1181545f56b..e79d8a850d4 100644 --- a/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go +++ b/vendor/gonum.org/v1/gonum/graph/iterator/nodes_map_safe.go @@ -16,10 +16,11 @@ import ( // Nodes implements the graph.Nodes interfaces. // The iteration order of Nodes is randomized. type Nodes struct { - nodes reflect.Value - iter *reflect.MapIter - pos int - curr graph.Node + iter reflect.MapIter + pos, len int + curr graph.Node + value reflect.Value + nodes reflect.Value } // NewNodes returns a Nodes initialized with the provided nodes, a @@ -30,23 +31,26 @@ type Nodes struct { // the call to NewNodes. func NewNodes(nodes map[int64]graph.Node) *Nodes { rv := reflect.ValueOf(nodes) - return &Nodes{nodes: rv, iter: rv.MapRange()} + n := &Nodes{nodes: rv, len: len(nodes)} + n.iter.Reset(rv) + n.value = reflect.ValueOf(&n.curr).Elem() + return n } // Len returns the remaining number of nodes to be iterated over. func (n *Nodes) Len() int { - return n.nodes.Len() - n.pos + return n.len - n.pos } // Next returns whether the next call of Node will return a valid node. func (n *Nodes) Next() bool { - if n.pos >= n.nodes.Len() { + if n.pos >= n.len { return false } ok := n.iter.Next() if ok { n.pos++ - n.curr = n.iter.Value().Interface().(graph.Node) + n.value.SetIterValue(&n.iter) } return ok } @@ -73,20 +77,21 @@ func (n *Nodes) NodeSlice() []graph.Node { } nodes := make([]graph.Node, 0, n.Len()) for n.iter.Next() { - nodes = append(nodes, n.iter.Value().Interface().(graph.Node)) + n.value.SetIterValue(&n.iter) + nodes = append(nodes, n.curr) } - n.pos = n.nodes.Len() + n.pos = n.len return nodes } // NodesByEdge implements the graph.Nodes interfaces. // The iteration order of Nodes is randomized. type NodesByEdge struct { - nodes map[int64]graph.Node - edges reflect.Value - iter *reflect.MapIter - pos int - curr graph.Node + iter reflect.MapIter + pos, len int + edges reflect.Value + curr graph.Node + nodes map[int64]graph.Node } // NewNodesByEdge returns a NodesByEdge initialized with the @@ -100,7 +105,9 @@ type NodesByEdge struct { // is mutated after the call to NewNodes. func NewNodesByEdge(nodes map[int64]graph.Node, edges map[int64]graph.Edge) *NodesByEdge { rv := reflect.ValueOf(edges) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(edges), edges: rv} + n.iter.Reset(rv) + return n } // NewNodesByWeightedEdge returns a NodesByEdge initialized with the @@ -114,7 +121,9 @@ func NewNodesByEdge(nodes map[int64]graph.Node, edges map[int64]graph.Edge) *Nod // is mutated after the call to NewNodes. func NewNodesByWeightedEdge(nodes map[int64]graph.Node, edges map[int64]graph.WeightedEdge) *NodesByEdge { rv := reflect.ValueOf(edges) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(edges), edges: rv} + n.iter.Reset(rv) + return n } // NewNodesByLines returns a NodesByEdge initialized with the @@ -128,7 +137,9 @@ func NewNodesByWeightedEdge(nodes map[int64]graph.Node, edges map[int64]graph.We // is mutated after the call to NewNodes. func NewNodesByLines(nodes map[int64]graph.Node, lines map[int64]map[int64]graph.Line) *NodesByEdge { rv := reflect.ValueOf(lines) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(lines), edges: rv} + n.iter.Reset(rv) + return n } // NewNodesByWeightedLines returns a NodesByEdge initialized with the @@ -142,17 +153,19 @@ func NewNodesByLines(nodes map[int64]graph.Node, lines map[int64]map[int64]graph // is mutated after the call to NewNodes. func NewNodesByWeightedLines(nodes map[int64]graph.Node, lines map[int64]map[int64]graph.WeightedLine) *NodesByEdge { rv := reflect.ValueOf(lines) - return &NodesByEdge{nodes: nodes, edges: rv, iter: rv.MapRange()} + n := &NodesByEdge{nodes: nodes, len: len(lines), edges: rv} + n.iter.Reset(rv) + return n } // Len returns the remaining number of nodes to be iterated over. func (n *NodesByEdge) Len() int { - return n.edges.Len() - n.pos + return n.len - n.pos } // Next returns whether the next call of Node will return a valid node. func (n *NodesByEdge) Next() bool { - if n.pos >= n.edges.Len() { + if n.pos >= n.len { return false } ok := n.iter.Next() @@ -173,7 +186,7 @@ func (n *NodesByEdge) Node() graph.Node { func (n *NodesByEdge) Reset() { n.curr = nil n.pos = 0 - n.iter = n.edges.MapRange() + n.iter.Reset(n.edges) } // NodeSlice returns all the remaining nodes in the iterator and advances @@ -185,8 +198,9 @@ func (n *NodesByEdge) NodeSlice() []graph.Node { } nodes := make([]graph.Node, 0, n.Len()) for n.iter.Next() { - nodes = append(nodes, n.nodes[n.iter.Key().Int()]) + n.curr = n.nodes[n.iter.Key().Int()] + nodes = append(nodes, n.curr) } - n.pos = n.edges.Len() + n.pos = n.len return nodes } diff --git a/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go b/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go index ce14c76551f..75074425012 100644 --- a/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go +++ b/vendor/gonum.org/v1/gonum/graph/set/uid/uid.go @@ -17,12 +17,12 @@ const Max = math.MaxInt64 // Set implements available ID storage. type Set struct { maxID int64 - used, free set.Int64s + used, free set.Ints[int64] } // NewSet returns a new Set. func NewSet() *Set { - return &Set{maxID: -1, used: make(set.Int64s), free: make(set.Int64s)} + return &Set{maxID: -1, used: make(set.Ints[int64]), free: make(set.Ints[int64])} } // NewID returns a new unique ID. The ID returned is not considered used diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go index dbe5dc82ad7..34ce7b0bcb4 100644 --- a/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_directed_matrix.go @@ -6,8 +6,8 @@ package simple import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/internal/order" "gonum.org/v1/gonum/mat" ) @@ -60,7 +60,7 @@ func NewDirectedMatrix(n int, init, self, absent float64) *DirectedMatrix { // specifies the cost of self connection, and absent specifies the weight // returned for absent edges. func NewDirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *DirectedMatrix { - ordered.ByID(nodes) + order.ByID(nodes) for i, n := range nodes { if int64(i) != n.ID() { panic("simple: non-contiguous node IDs") diff --git a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go index 27fc5a27a57..e97e6afb484 100644 --- a/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go +++ b/vendor/gonum.org/v1/gonum/graph/simple/dense_undirected_matrix.go @@ -6,8 +6,8 @@ package simple import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/internal/order" "gonum.org/v1/gonum/mat" ) @@ -60,7 +60,7 @@ func NewUndirectedMatrix(n int, init, self, absent float64) *UndirectedMatrix { // specifies the cost of self connection, and absent specifies the weight // returned for absent edges. func NewUndirectedMatrixFrom(nodes []graph.Node, init, self, absent float64) *UndirectedMatrix { - ordered.ByID(nodes) + order.ByID(nodes) for i, n := range nodes { if int64(i) != n.ID() { panic("simple: non-contiguous node IDs") diff --git a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go index 83fdb5bdf8b..e190c260320 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/bron_kerbosch.go @@ -5,8 +5,9 @@ package topo import ( + "slices" + "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" ) @@ -15,7 +16,7 @@ import ( func DegeneracyOrdering(g graph.Undirected) (order []graph.Node, cores [][]graph.Node) { order, offsets := degeneracyOrdering(g) - ordered.Reverse(order) + slices.Reverse(order) cores = make([][]graph.Node, len(offsets)) offset := len(order) for i, n := range offsets { @@ -145,7 +146,7 @@ func BronKerbosch(g graph.Undirected) [][]graph.Node { x := set.NewNodes() var bk bronKerbosch order, _ := degeneracyOrdering(g) - ordered.Reverse(order) + slices.Reverse(order) for _, v := range order { neighbours := graph.NodesOf(g.From(v.ID())) nv := set.NewNodesSize(len(neighbours)) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go index 202fa953f94..2223ca3b747 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/clique_graph.go @@ -6,8 +6,8 @@ package topo import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" + "gonum.org/v1/gonum/internal/order" ) // Builder is a pure topological graph construction type. @@ -26,9 +26,9 @@ func CliqueGraph(dst Builder, g graph.Undirected) { // Construct a consistent view of cliques in g. Sorting costs // us a little, but not as much as the cliques themselves. for _, c := range cliques { - ordered.ByID(c) + order.ByID(c) } - ordered.BySliceIDs(cliques) + order.BySliceIDs(cliques) cliqueNodes := make(cliqueNodeSets, len(cliques)) for id, c := range cliques { @@ -59,7 +59,7 @@ func CliqueGraph(dst Builder, g graph.Undirected) { for _, n := range set.IntersectionOfNodes(uc.nodes, vc.nodes) { edgeNodes = append(edgeNodes, n) } - ordered.ByID(edgeNodes) + order.ByID(edgeNodes) } dst.SetEdge(CliqueGraphEdge{from: uc.Clique, to: vc.Clique, nodes: edgeNodes}) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go index 6c90f60ab53..0d8407b0411 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/johnson_cycles.go @@ -6,9 +6,9 @@ package topo import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" "gonum.org/v1/gonum/graph/iterator" + "gonum.org/v1/gonum/internal/order" ) // johnson implements Johnson's "Finding all the elementary @@ -17,8 +17,8 @@ import ( // Comments in the johnson methods are kept in sync with the comments // and labels from the paper. type johnson struct { - adjacent johnsonGraph // SCC adjacency list. - b []set.Ints // Johnson's "B-list". + adjacent johnsonGraph // SCC adjacency list. + b []set.Ints[int] // Johnson's "B-list". blocked []bool s int @@ -32,7 +32,7 @@ func DirectedCyclesIn(g graph.Directed) [][]graph.Node { jg := johnsonGraphFrom(g) j := johnson{ adjacent: jg, - b: make([]set.Ints, len(jg.orig)), + b: make([]set.Ints[int], len(jg.orig)), blocked: make([]bool, len(jg.orig)), } @@ -57,7 +57,7 @@ func DirectedCyclesIn(g graph.Directed) [][]graph.Node { } if len(j.adjacent.succ[v.ID()]) > 0 { j.blocked[i] = false - j.b[i] = make(set.Ints) + j.b[i] = make(set.Ints[int]) } } //L3: @@ -125,20 +125,20 @@ type johnsonGraph struct { orig []graph.Node index map[int64]int - nodes set.Int64s - succ map[int64]set.Int64s + nodes set.Ints[int64] + succ map[int64]set.Ints[int64] } // johnsonGraphFrom returns a deep copy of the graph g. func johnsonGraphFrom(g graph.Directed) johnsonGraph { nodes := graph.NodesOf(g.Nodes()) - ordered.ByID(nodes) + order.ByID(nodes) c := johnsonGraph{ orig: nodes, index: make(map[int64]int, len(nodes)), - nodes: make(set.Int64s, len(nodes)), - succ: make(map[int64]set.Int64s), + nodes: make(set.Ints[int64], len(nodes)), + succ: make(map[int64]set.Ints[int64]), } for i, u := range nodes { uid := u.ID() @@ -147,7 +147,7 @@ func johnsonGraphFrom(g graph.Directed) johnsonGraph { for to.Next() { v := to.Node() if c.succ[uid] == nil { - c.succ[uid] = make(set.Int64s) + c.succ[uid] = make(set.Ints[int64]) c.nodes.Add(uid) } c.nodes.Add(v.ID()) @@ -207,8 +207,8 @@ func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { sub := johnsonGraph{ orig: g.orig, index: g.index, - nodes: make(set.Int64s), - succ: make(map[int64]set.Int64s), + nodes: make(set.Ints[int64]), + succ: make(map[int64]set.Ints[int64]), } var n int @@ -221,7 +221,7 @@ func (g johnsonGraph) sccSubGraph(sccs [][]graph.Node, min int) johnsonGraph { for _, v := range scc { if _, ok := g.succ[u.ID()][v.ID()]; ok { if sub.succ[u.ID()] == nil { - sub.succ[u.ID()] = make(set.Int64s) + sub.succ[u.ID()] = make(set.Ints[int64]) sub.nodes.Add(u.ID()) } sub.nodes.Add(v.ID()) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go index 525f2983f73..2e1ce4cb7ba 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/paton_cycles.go @@ -17,7 +17,7 @@ func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { // https://doi.org/10.1145/363219.363232 var cycles [][]graph.Node - done := make(set.Int64s) + done := make(set.Ints[int64]) var tree linear.NodeStack nodes := g.Nodes() for nodes.Next() { @@ -30,7 +30,7 @@ func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { tree = tree[:0] tree.Push(n) - from := sets{id: set.Int64s{}} + from := sets{id: set.Ints[int64]{}} to := map[int64]graph.Node{id: n} for tree.Len() != 0 { @@ -68,12 +68,12 @@ func UndirectedCyclesIn(g graph.Undirected) [][]graph.Node { return cycles } -type sets map[int64]set.Int64s +type sets map[int64]set.Ints[int64] func (s sets) add(uid, vid int64) { e, ok := s[vid] if !ok { - e = make(set.Int64s) + e = make(set.Ints[int64]) s[vid] = e } e.Add(uid) diff --git a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go index ab479a92d7f..31b165199a7 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/tarjan.go @@ -6,10 +6,11 @@ package topo import ( "fmt" + "slices" "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/internal/set" + "gonum.org/v1/gonum/internal/order" ) // Unorderable is an error containing sets of unorderable graph.Nodes. @@ -29,7 +30,7 @@ func (e Unorderable) Error() string { return fmt.Sprintf("topo: no topological ordering: cyclic components: %v", [][]graph.Node(e)) } -func lexical(nodes []graph.Node) { ordered.ByID(nodes) } +func lexical(nodes []graph.Node) { order.ByID(nodes) } // Sort performs a topological sort of the directed graph g returning the 'from' to 'to' // sort order. If a topological ordering is not possible, an Unorderable error is returned @@ -70,12 +71,10 @@ func sortedFrom(sccs [][]graph.Node, order func([]graph.Node)) ([]graph.Node, er } var err error if sc != nil { - for i, j := 0, len(sc)-1; i < j; i, j = i+1, j-1 { - sc[i], sc[j] = sc[j], sc[i] - } + slices.Reverse(sc) err = sc } - ordered.Reverse(sorted) + slices.Reverse(sorted) return sorted, err } @@ -100,12 +99,12 @@ func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.N } } else { order(nodes) - ordered.Reverse(nodes) + slices.Reverse(nodes) succ = func(id int64) []graph.Node { to := graph.NodesOf(g.From(id)) order(to) - ordered.Reverse(to) + slices.Reverse(to) return to } } @@ -115,7 +114,7 @@ func tarjanSCCstabilized(g graph.Directed, order func([]graph.Node)) [][]graph.N indexTable: make(map[int64]int, len(nodes)), lowLink: make(map[int64]int, len(nodes)), - onStack: make(set.Int64s), + onStack: make(set.Ints[int64]), } for _, v := range nodes { if t.indexTable[v.ID()] == 0 { @@ -135,7 +134,7 @@ type tarjan struct { index int indexTable map[int64]int lowLink map[int64]int - onStack set.Int64s + onStack set.Ints[int64] stack []graph.Node @@ -187,10 +186,3 @@ func (t *tarjan) strongconnect(v graph.Node) { t.sccs = append(t.sccs, scc) } } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/gonum.org/v1/gonum/graph/topo/topo.go b/vendor/gonum.org/v1/gonum/graph/topo/topo.go index a76accb3186..344972798ca 100644 --- a/vendor/gonum.org/v1/gonum/graph/topo/topo.go +++ b/vendor/gonum.org/v1/gonum/graph/topo/topo.go @@ -6,8 +6,8 @@ package topo import ( "gonum.org/v1/gonum/graph" - "gonum.org/v1/gonum/graph/internal/ordered" "gonum.org/v1/gonum/graph/traverse" + "gonum.org/v1/gonum/internal/order" ) // IsPathIn returns whether path is a path in g. @@ -80,8 +80,8 @@ func Equal(a, b graph.Graph) bool { aNodeSlice := graph.NodesOf(aNodes) bNodeSlice := graph.NodesOf(bNodes) - ordered.ByID(aNodeSlice) - ordered.ByID(bNodeSlice) + order.ByID(aNodeSlice) + order.ByID(bNodeSlice) for i, aU := range aNodeSlice { id := aU.ID() if id != bNodeSlice[i].ID() { @@ -96,8 +96,8 @@ func Equal(a, b graph.Graph) bool { aAdjacent := graph.NodesOf(toA) bAdjacent := graph.NodesOf(toB) - ordered.ByID(aAdjacent) - ordered.ByID(bAdjacent) + order.ByID(aAdjacent) + order.ByID(bAdjacent) for i, aV := range aAdjacent { id := aV.ID() if id != bAdjacent[i].ID() { diff --git a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go index 2a4efb66a60..bc4f9596c38 100644 --- a/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go +++ b/vendor/gonum.org/v1/gonum/graph/traverse/traverse.go @@ -39,7 +39,7 @@ type BreadthFirst struct { Traverse func(graph.Edge) bool queue linear.NodeQueue - visited set.Int64s + visited set.Ints[int64] } // Walk performs a breadth-first traversal of the graph g starting from the given node, @@ -49,7 +49,7 @@ type BreadthFirst struct { // non-nil, it is called with each node the first time it is visited. func (b *BreadthFirst) Walk(g Graph, from graph.Node, until func(n graph.Node, d int) bool) graph.Node { if b.visited == nil { - b.visited = make(set.Int64s) + b.visited = make(set.Ints[int64]) } b.queue.Enqueue(from) if b.Visit != nil && !b.visited.Has(from.ID()) { @@ -147,7 +147,7 @@ type DepthFirst struct { Traverse func(graph.Edge) bool stack linear.NodeStack - visited set.Int64s + visited set.Ints[int64] } // Walk performs a depth-first traversal of the graph g starting from the given node, @@ -157,7 +157,7 @@ type DepthFirst struct { // is called with each node the first time it is visited. func (d *DepthFirst) Walk(g Graph, from graph.Node, until func(graph.Node) bool) graph.Node { if d.visited == nil { - d.visited = make(set.Int64s) + d.visited = make(set.Ints[int64]) } d.stack.Push(from) for d.stack.Len() != 0 { diff --git a/vendor/gonum.org/v1/gonum/internal/order/doc.go b/vendor/gonum.org/v1/gonum/internal/order/doc.go new file mode 100644 index 00000000000..6ed2f38c6b2 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/order/doc.go @@ -0,0 +1,6 @@ +// Copyright ©2024 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package order provides common sorting functions. +package order // import "gonum.org/v1/gonum/internal/order" diff --git a/vendor/gonum.org/v1/gonum/internal/order/order.go b/vendor/gonum.org/v1/gonum/internal/order/order.go new file mode 100644 index 00000000000..7baaec38c78 --- /dev/null +++ b/vendor/gonum.org/v1/gonum/internal/order/order.go @@ -0,0 +1,60 @@ +// Copyright ©2024 The Gonum Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package order + +import ( + "cmp" + "slices" + "sort" + + "gonum.org/v1/gonum/graph" +) + +// ByID sorts a slice of graph.Node by ID. +func ByID[S ~[]E, E graph.Node](n S) { + sort.Slice(n, func(i, j int) bool { return n[i].ID() < n[j].ID() }) +} + +// BySliceValues sorts a slice of []cmp.Ordered lexically by the values of +// the []cmp.Ordered. +func BySliceValues[S ~[]E, E cmp.Ordered](c []S) { + slices.SortFunc(c, func(a, b S) int { + l := min(len(a), len(b)) + for k, v := range a[:l] { + if n := cmp.Compare(v, b[k]); n != 0 { + return n + } + } + return cmp.Compare(len(a), len(b)) + }) +} + +// BySliceIDs sorts a slice of []graph.Node lexically by the IDs of the +// []graph.Node. +func BySliceIDs(c [][]graph.Node) { + slices.SortFunc(c, func(a, b []graph.Node) int { + l := min(len(a), len(b)) + for k, v := range a[:l] { + if n := cmp.Compare(v.ID(), b[k].ID()); n != 0 { + return n + } + } + return cmp.Compare(len(a), len(b)) + }) +} + +// LinesByIDs sort a slice of graph.LinesByIDs lexically by the From IDs, +// then by the To IDs, finally by the Line IDs. +func LinesByIDs(n []graph.Line) { + slices.SortFunc(n, func(a, b graph.Line) int { + if n := cmp.Compare(a.From().ID(), b.From().ID()); n != 0 { + return n + } + if n := cmp.Compare(a.To().ID(), b.To().ID()); n != 0 { + return n + } + return cmp.Compare(a.ID(), b.ID()) + }) +} diff --git a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go index fef4f5583de..5daefc584dd 100644 --- a/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go +++ b/vendor/gonum.org/v1/gonum/lapack/gonum/lapack.go @@ -13,20 +13,6 @@ type Implementation struct{} var _ lapack.Float64 = Implementation{} -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - func abs(a int) int { if a < 0 { return -a diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack.go b/vendor/gonum.org/v1/gonum/lapack/lapack.go index 60b5d0d363b..60ef1c244a6 100644 --- a/vendor/gonum.org/v1/gonum/lapack/lapack.go +++ b/vendor/gonum.org/v1/gonum/lapack/lapack.go @@ -221,7 +221,7 @@ const ( EVSelected EVHowMany = 'S' // Compute selected right and/or left eigenvectors. ) -// MaximizeNormX specifies the heuristic method for computing a contribution to +// MaximizeNormXJob specifies the heuristic method for computing a contribution to // the reciprocal Dif-estimate in Dlatdf. type MaximizeNormXJob byte diff --git a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go index d0afab119cd..1b4c1734a15 100644 --- a/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go +++ b/vendor/gonum.org/v1/gonum/lapack/lapack64/lapack64.go @@ -27,13 +27,6 @@ type Tridiagonal struct { DU []float64 } -func max(a, b int) int { - if a > b { - return a - } - return b -} - // Potrf computes the Cholesky factorization of a. // The factorization has the form // diff --git a/vendor/gonum.org/v1/gonum/mat/lu.go b/vendor/gonum.org/v1/gonum/mat/lu.go index 18ed3dab636..b530ada7e5c 100644 --- a/vendor/gonum.org/v1/gonum/mat/lu.go +++ b/vendor/gonum.org/v1/gonum/mat/lu.go @@ -237,6 +237,8 @@ func (lu *LU) RowPivots(dst []int) []int { return dst } +// Pivot returns the row pivots of the receiver. +// // Deprecated: Use RowPivots instead. func (lu *LU) Pivot(dst []int) []int { return lu.RowPivots(dst) diff --git a/vendor/gonum.org/v1/gonum/mat/matrix.go b/vendor/gonum.org/v1/gonum/mat/matrix.go index 9fc372c71ed..2d67bbe081e 100644 --- a/vendor/gonum.org/v1/gonum/mat/matrix.go +++ b/vendor/gonum.org/v1/gonum/mat/matrix.go @@ -962,20 +962,6 @@ func Trace(a Matrix) float64 { return v } -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - // use returns a float64 slice with l elements, using f if it // has the necessary capacity, otherwise creating a new slice. func use(f []float64, l int) []float64 { diff --git a/vendor/gonum.org/v1/gonum/mat/qr.go b/vendor/gonum.org/v1/gonum/mat/qr.go index af99dbcaa15..7f8fec8f6f6 100644 --- a/vendor/gonum.org/v1/gonum/mat/qr.go +++ b/vendor/gonum.org/v1/gonum/mat/qr.go @@ -31,8 +31,13 @@ func (qr *QR) Dims() (r, c int) { return qr.qr.Dims() } -// At returns the element at row i, column j. +// At returns the element at row i, column j. At will panic if the receiver +// does not contain a successful factorization. func (qr *QR) At(i, j int) float64 { + if !qr.isValid() { + panic(badQR) + } + m, n := qr.Dims() if uint(i) >= uint(m) { panic(ErrRowAccess) @@ -41,6 +46,20 @@ func (qr *QR) At(i, j int) float64 { panic(ErrColAccess) } + if qr.q == nil || qr.q.IsEmpty() { + // Calculate Qi, Q i-th row + qi := getFloat64s(m, true) + qr.qRowTo(i, qi) + + // Compute QR(i,j) + var val float64 + for k := 0; k <= j; k++ { + val += qi[k] * qr.qr.at(k, j) + } + putFloat64s(qi) + return val + } + var val float64 for k := 0; k <= j; k++ { val += qr.q.at(i, k) * qr.qr.at(k, j) @@ -48,6 +67,25 @@ func (qr *QR) At(i, j int) float64 { return val } +// qRowTo extracts the i-th row of the orthonormal matrix Q from a QR +// decomposition. +func (qr *QR) qRowTo(i int, dst []float64) { + c := blas64.General{ + Rows: 1, + Cols: len(dst), + Stride: len(dst), + Data: dst, + } + c.Data[i] = 1 // C is the i-th unit vector + + // Construct Qi from the elementary reflectors: Qi = C * (H(1) H(2) ... H(nTau)) + work := []float64{0} + lapack64.Ormqr(blas.Right, blas.NoTrans, qr.qr.mat, qr.tau, c, work, -1) + work = getFloat64s(int(work[0]), false) + lapack64.Ormqr(blas.Right, blas.NoTrans, qr.qr.mat, qr.tau, c, work, len(work)) + putFloat64s(work) +} + // T performs an implicit transpose by returning the receiver inside a // Transpose. func (qr *QR) T() Matrix { @@ -98,7 +136,9 @@ func (qr *QR) factorize(a Matrix, norm lapack.MatrixNorm) { lapack64.Geqrf(qr.qr.mat, qr.tau, work, len(work)) putFloat64s(work) qr.updateCond(norm) - qr.updateQ() + if qr.q != nil { + qr.q.Reset() + } } func (qr *QR) updateQ() { @@ -149,7 +189,7 @@ func (qr *QR) RTo(dst *Dense) { dst.ReuseAs(r, c) } else { r2, c2 := dst.Dims() - if c != r2 || c != c2 { + if r != r2 || c != c2 { panic(ErrShape) } } @@ -192,6 +232,10 @@ func (qr *QR) QTo(dst *Dense) { panic(ErrShape) } } + + if qr.q == nil || qr.q.IsEmpty() { + qr.updateQ() + } dst.Copy(qr.q) } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 636edb460a4..4a9fce53c44 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -180,6 +180,8 @@ type CommonLanguageSettings struct { ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` // The destination where API teams want this client library to be published. Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` + // Configuration for which RPCs should be generated in the GAPIC client. + SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"` } func (x *CommonLanguageSettings) Reset() { @@ -229,6 +231,13 @@ func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { return nil } +func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration { + if x != nil { + return x.SelectiveGapicGeneration + } + return nil +} + // Details about how and where to publish client libraries. type ClientLibrarySettings struct { state protoimpl.MessageState @@ -719,6 +728,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +771,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -975,6 +993,16 @@ type GoSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Map of service names to renamed services. Keys are the package relative + // service names and values are the name to be used for the service client + // and call options. + // + // publishing: + // + // go_settings: + // renamed_services: + // Publisher: TopicAdmin + RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GoSettings) Reset() { @@ -1016,6 +1044,13 @@ func (x *GoSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *GoSettings) GetRenamedServices() map[string]string { + if x != nil { + return x.RenamedServices + } + return nil +} + // Describes the generator configuration for a method. type MethodSettings struct { state protoimpl.MessageState @@ -1024,6 +1059,13 @@ type MethodSettings struct { // The fully qualified name of the method, for which the options below apply. // This is used to find the method to apply the options. + // + // Example: + // + // publishing: + // method_settings: + // - selector: google.storage.control.v2.StorageControl.CreateFolder + // # method settings for CreateFolder... Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` // Describes settings to use for long-running operations when generating // API methods for RPCs. Complements RPCs that use the annotations in @@ -1033,15 +1075,12 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.cloud.speech.v2.Speech.BatchRecognize - // long_running: - // initial_poll_delay: - // seconds: 60 # 1 minute - // poll_delay_multiplier: 1.5 - // max_poll_delay: - // seconds: 360 # 6 minutes - // total_poll_timeout: - // seconds: 54000 # 90 minutes + // - selector: google.cloud.speech.v2.Speech.BatchRecognize + // long_running: + // initial_poll_delay: 60s # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: 360s # 6 minutes + // total_poll_timeout: 54000s # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` // List of top-level fields of the request message, that should be // automatically populated by the client libraries based on their @@ -1051,9 +1090,9 @@ type MethodSettings struct { // // publishing: // method_settings: - // - selector: google.example.v1.ExampleService.CreateExample - // auto_populated_fields: - // - request_id + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } @@ -1110,6 +1149,123 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// This message is used to configure the generation of a subset of the RPCs in +// a service for client libraries. +type SelectiveGapicGeneration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An allowlist of the fully qualified names of RPCs that should be included + // on public client surfaces. + Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"` +} + +func (x *SelectiveGapicGeneration) Reset() { + *x = SelectiveGapicGeneration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectiveGapicGeneration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectiveGapicGeneration) ProtoMessage() {} + +func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead. +func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{12} +} + +func (x *SelectiveGapicGeneration) GetMethods() []string { + if x != nil { + return x.Methods + } + return nil +} + +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` + // Enables generation of protobuf code using new types that are more + // Pythonic which are included in `protobuf>=5.29.x`. This feature will be + // enabled by default 1 month after launching the feature in preview + // packages. + ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + +func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool { + if x != nil { + return x.ProtobufPythonicTypesEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1138,7 +1294,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +1307,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1339,7 +1495,7 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, @@ -1348,240 +1504,275 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, - 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, - 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, - 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, - 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, - 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, - 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, - 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a, + 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, + 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, + 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, + 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, - 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, - 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, - 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, - 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, - 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, - 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, - 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, - 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, - 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x72, 0x65, 0x73, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, - 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, - 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, - 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, + 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, + 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, + 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, + 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, + 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, + 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a, + 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, + 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, + 0x6e, 0x22, 0xc5, 0x02, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, + 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x90, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, + 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, + 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, + 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, + 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, + 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, + 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, + 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, + 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, - 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x56, 0x0a, + 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, + 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, + 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, + 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, + 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x34, + 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x73, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, + 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, + 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, + 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, + 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, + 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, + 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, + 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, + 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -1597,69 +1788,75 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration + nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures + nil, // 17: google.api.DotnetSettings.RenamedServicesEntry + nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry + nil, // 19: google.api.GoSettings.RenamedServicesEntry + (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 21: google.api.LaunchStage + (*durationpb.Duration)(nil), // 22: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage - 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings - 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings - 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings - 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings - 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings - 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings - 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings - 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings - 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings - 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization - 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings - 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry - 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration + 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry + 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 35, // [35:35] is the sub-list for method output_type + 35, // [35:35] is the sub-list for method input_type + 35, // [35:35] is the sub-list for extension type_name + 31, // [31:35] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1812,7 +2009,31 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectiveGapicGeneration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1831,7 +2052,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 19, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index d339dfb02ac..a462e7d0132 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -121,6 +121,11 @@ type FieldInfo struct { // any API consumer, just documents the API's format for the field it is // applied to. Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` + // The type(s) that the annotated, generic field may represent. + // + // Currently, this must only be used on fields of type `google.protobuf.Any`. + // Supporting other generic types may be considered in the future. + ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"` } func (x *FieldInfo) Reset() { @@ -162,6 +167,70 @@ func (x *FieldInfo) GetFormat() FieldInfo_Format { return FieldInfo_FORMAT_UNSPECIFIED } +func (x *FieldInfo) GetReferencedTypes() []*TypeReference { + if x != nil { + return x.ReferencedTypes + } + return nil +} + +// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo]. +type TypeReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the type that the annotated, generic field may represent. + // If the type is in the same protobuf package, the value can be the simple + // message name e.g., `"MyMessage"`. Otherwise, the value must be the + // fully-qualified message name e.g., `"google.library.v1.Book"`. + // + // If the type(s) are unknown to the service (e.g. the field accepts generic + // user input), use the wildcard `"*"` to denote this behavior. + // + // See [AIP-202](https://google.aip.dev/202#type-references) for more details. + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` +} + +func (x *TypeReference) Reset() { + *x = TypeReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReference) ProtoMessage() {} + +func (x *TypeReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead. +func (*TypeReference) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{1} +} + +func (x *TypeReference) GetTypeName() string { + if x != nil { + return x.TypeName + } + return "" +} + var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FieldOptions)(nil), @@ -185,6 +254,13 @@ var ( // string actual_ip_address = 4 [ // (google.api.field_info).format = IPV4_OR_IPV6 // ]; + // google.protobuf.Any generic_field = 5 [ + // (google.api.field_info).referenced_types = {type_name: "ActualType"}, + // (google.api.field_info).referenced_types = {type_name: "OtherType"}, + // ]; + // google.protobuf.Any generic_user_input = 5 [ + // (google.api.field_info).referenced_types = {type_name: "*"}, + // ]; // // optional google.api.FieldInfo field_info = 291403980; E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] @@ -197,30 +273,37 @@ var file_google_api_field_info_proto_rawDesc = []byte{ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, - 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, - 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, - 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, - 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, + 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, + 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -236,21 +319,23 @@ func file_google_api_field_info_proto_rawDescGZIP() []byte { } var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_google_api_field_info_proto_goTypes = []interface{}{ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format (*FieldInfo)(nil), // 1: google.api.FieldInfo - (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions + (*TypeReference)(nil), // 2: google.api.TypeReference + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions } var file_google_api_field_info_proto_depIdxs = []int32{ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format - 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions - 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 2, // [2:3] is the sub-list for extension type_name - 1, // [1:2] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference + 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_api_field_info_proto_init() } @@ -271,6 +356,18 @@ func file_google_api_field_info_proto_init() { return nil } } + file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -278,7 +375,7 @@ func file_google_api_field_info_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_field_info_proto_rawDesc, NumEnums: 1, - NumMessages: 1, + NumMessages: 2, NumExtensions: 1, NumServices: 0, }, diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 76ea76df330..ffb5838cb18 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -102,7 +102,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { return false } -// # gRPC Transcoding +// gRPC Transcoding // // gRPC Transcoding is a feature for mapping between a gRPC method and one or // more HTTP REST endpoints. It allows developers to build a single API service @@ -143,9 +143,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables an HTTP REST to gRPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(name: "messages/123456")` // // Any fields in the request message which are not bound by the path template // automatically become HTTP query parameters if there is no HTTP request body. @@ -169,11 +168,9 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables a HTTP JSON to RPC mapping as below: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` +// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo` +// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub: +// SubMessage(subfield: "foo"))` // // Note that fields which are mapped to URL query parameters must have a // primitive type or a repeated primitive type or a non-repeated message type. @@ -203,10 +200,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // representation of the JSON in the request body is determined by // protos JSON encoding: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the @@ -228,10 +223,8 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // The following HTTP JSON to RPC mapping is enabled: // -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` +// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }` +// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in @@ -259,13 +252,13 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // This enables the following two alternative HTTP JSON to RPC mappings: // -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` +// - HTTP: `GET /v1/messages/123456` +// - gRPC: `GetMessage(message_id: "123456")` // -// ## Rules for HTTP mapping +// - HTTP: `GET /v1/users/me/messages/123456` +// - gRPC: `GetMessage(user_id: "me" message_id: "123456")` +// +// # Rules for HTTP mapping // // 1. Leaf request fields (recursive expansion nested messages in the request // message) are classified into three categories: @@ -284,7 +277,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body, all // fields are passed via URL path and URL query parameters. // -// ### Path template syntax +// Path template syntax // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; @@ -323,7 +316,7 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // Document](https://developers.google.com/discovery/v1/reference/apis) as // `{+var}`. // -// ## Using gRPC API Service Configuration +// # Using gRPC API Service Configuration // // gRPC API Service Configuration (service config) is a configuration language // for configuring a gRPC service to become a user-facing product. The @@ -338,15 +331,14 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specified in the service config will override any matching transcoding // configuration in the proto. // -// Example: +// The following example selects a gRPC method and applies an `HttpRule` to it: // // http: // rules: -// # Selects a gRPC method and applies HttpRule to it. // - selector: example.v1.Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // -// ## Special notes +// # Special notes // // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the // proto to JSON conversion must follow the [proto3 diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 7a3fd93fcd9..b5db279aebf 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -253,8 +253,13 @@ type ResourceDescriptor struct { History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` // The plural name used in the resource name and permission names, such as // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec + // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception + // to this is for Nested Collections that have stuttering names, as defined + // in [AIP-122](https://google.aip.dev/122#nested-collections), where the + // collection ID in the resource name pattern does not necessarily directly + // match the `plural` value. + // + // It is the same concept of the `plural` field in k8s CRD spec // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ // // Note: The plural form is required even for singleton resources. See diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e365..f388426b08f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e41..d9bfa6e1e7c 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae..5d4096d46a0 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c..abab279379b 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce..d7b40b7cb66 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f729..3a2092f1056 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -116,7 +130,7 @@ type SubConn interface { // UpdateAddresses updates the addresses used in this SubConn. // gRPC checks if currently-connected address is still in the new list. // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and + // If it's not in the list, the connection will gracefully close, and // a new connection will be created. // // This will trigger a state transition for the SubConn. @@ -128,8 +142,11 @@ type SubConn interface { Connect() // GetOrBuildProducer returns a reference to the existing Producer for this // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) // Shutdown shuts down the SubConn gracefully. Any started RPCs will be // allowed to complete. No future calls should be made on the SubConn. @@ -243,6 +260,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +431,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the @@ -431,8 +455,10 @@ type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. Build(grpcClientConnInterface any) (p Producer, close func()) } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6..d5ed172ae69 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 0adc98866c0..52f54e6a016 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -780,7 +780,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { } var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{ (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken @@ -818,7 +818,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceRequest); i { case 0: return &v.state @@ -830,7 +830,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceRequest); i { case 0: return &v.state @@ -842,7 +842,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ClientStatsPerToken); i { case 0: return &v.state @@ -854,7 +854,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ClientStats); i { case 0: return &v.state @@ -866,7 +866,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceResponse); i { case 0: return &v.state @@ -878,7 +878,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*FallbackResponse); i { case 0: return &v.state @@ -890,7 +890,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceResponse); i { case 0: return &v.state @@ -902,7 +902,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ServerList); i { case 0: return &v.state @@ -914,7 +914,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Server); i { case 0: return &v.state @@ -927,11 +927,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() { } } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{ (*LoadBalanceResponse_InitialResponse)(nil), (*LoadBalanceResponse_ServerList)(nil), (*LoadBalanceResponse_FallbackResponse)(nil), diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 57a792a7b48..84e6a25056b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -46,7 +46,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) } type loadBalancerClient struct { @@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{ClientStream: stream} + x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} return x, nil } -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancerServer is the server API for LoadBalancer service. // All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility +// for forward compatibility. type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error + BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error } -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} +// UnimplementedLoadBalancerServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerServer struct{} -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerServer will @@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface { } func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LoadBalancer_ServiceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream + return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) } -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 47a3e938dcf..0770b88e96d 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -197,7 +197,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be - // send to remote LB ClientConn through this resolver. + // sent to remote LB ClientConn through this resolver. manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. + // but with only READY SCs will be generated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go new file mode 100644 index 00000000000..c5197894584 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -0,0 +1,24 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains code internal to the pickfirst package. +package internal + +import "math/rand" + +// RandShuffle pseudo-randomizes the order of addresses. +var RandShuffle = rand.Shuffle diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1d..e069346a756 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -26,18 +26,23 @@ import ( "math/rand" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -50,7 +55,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each @@ -155,7 +163,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 00000000000..985b6edc7f4 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,625 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" +) + +// TODO: change to pick-first when this becomes the default pick_first policy. +const logPrefix = "[pick-first-leaf-lb %p] " + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + addressList: addressList{}, + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + mu: sync.Mutex{}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + state connectivity.State + lastErr error +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + sd := &scData{ + state: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.state = connectivity.TransientFailure + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8305 section 4." - A61 + // TODO: support the above language. + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + + // Since we have a new set of addresses, we are again at first pass. + b.firstPass = true + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + prevAddr := b.addressList.currentAddress() + prevAddrsCount := b.addressList.size() + b.addressList.updateAddrs(newAddrs) + if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.requestConnectionLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.requestConnectionLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { + b.firstPass = true + b.requestConnectionLocked() + } +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.state { + case connectivity.Idle: + scd.subConn.Connect() + case connectivity.TransientFailure: + // Try the next address. + lastErr = scd.lastErr + continue + case connectivity.Ready: + // Should never happen. + b.logger.Errorf("Requesting a connection even though we have a READY SubConn") + case connectivity.Shutdown: + // Should never happen. + b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") + case connectivity.Connecting: + // Wait for the SubConn to report success or failure. + } + return + } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass. + b.endFirstPassLocked(lastErr) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.state + sd.state = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + return + } + + if newState.ConnectivityState == connectivity.Ready { + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + b.state = connectivity.Ready + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + b.state = connectivity.Idle + b.addressList.reset() + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The balancer can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + // If the balancer is already in CONNECTING, no update is needed. + if b.state == connectivity.Idle { + b.state = connectivity.Connecting + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. We ignore such updates. + + if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + return + } + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + // End of the first pass. + b.endFirstPassLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { + b.firstPass = false + b.numTF = 0 + b.state = connectivity.TransientFailure + + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.state == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +// first returns the first address in the list. If the list is empty, it returns +// an empty address instead. +func (al *addressList) first() resolver.Address { + if len(al.addresses) == 0 { + return resolver.Address{} + } + return al.addresses[0] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a8..2a4f2878aef 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,13 +24,18 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +84,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +98,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +113,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +145,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +157,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +194,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -246,21 +258,28 @@ type acBalancerWrapper struct { ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer } // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) }) } @@ -277,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -284,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -311,15 +332,15 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. @@ -329,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() } return pData.producer, grpcsync.OnceFunc(unref) } + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe9..55bffaa77ef 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b0..19763f8eddf 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -837,12 +827,11 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), + addrs: copyAddresses(addrs), scopts: opts, dopts: cc.dopts, channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +907,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +937,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +956,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +982,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1152,10 +1140,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1190,8 +1183,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1196,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1203,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1220,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1263,6 +1254,8 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1304,7 +1297,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1516,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1585,7 +1555,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47c..e840858b77b 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go index 43726e877b8..7e4bfee8886 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// for HKDF-expand and the remaining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go index 6a9035ea254..b5bbb5497aa 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used +// are used as a key for HKDF-expand and the remaining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index 0d64fb37a12..f1ea7bb2081 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) { } return n, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 6c867dd8501..50721f690ac 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -128,7 +128,7 @@ type altsHandshaker struct { // NewClientHandshaker creates a core.Handshaker that performs a client-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, @@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // NewServerHandshaker creates a core.Handshaker that performs a server-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index e1cdafb980c..fbfde5d047f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -34,8 +34,6 @@ var ( // to a corresponding connection to a hypervisor handshaker service // instance. hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has @@ -49,8 +47,10 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { if !ok { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. + // Disable the service config to avoid unnecessary TXT record lookups that + // cause timeouts with some versions of systemd-resolved. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDisableServiceConfig()) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 38cb5cf0d74..b7de8f05b76 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/altscontext.proto package grpc_gcp @@ -201,7 +201,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { } var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ +var file_grpc_gcp_altscontext_proto_goTypes = []any{ (*AltsContext)(nil), // 0: grpc.gcp.AltsContext nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel @@ -225,7 +225,7 @@ func file_grpc_gcp_altscontext_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AltsContext); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 55fc7f65f10..79b5dad476c 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -533,7 +533,7 @@ type StartServerHandshakeReq struct { // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. + // that the peer's out_frames are split into multiple HandshakeReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. @@ -1071,7 +1071,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1108,139 +1108,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, + 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, - 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, - 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, - 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, - 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, - 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, - 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, - 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, - 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, - 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, - 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1257,7 +1258,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ +var file_grpc_gcp_handshaker_proto_goTypes = []any{ (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol (*Endpoint)(nil), // 2: grpc.gcp.Endpoint @@ -1313,7 +1314,7 @@ func file_grpc_gcp_handshaker_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Endpoint); i { case 0: return &v.state @@ -1325,7 +1326,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -1337,7 +1338,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*StartClientHandshakeReq); i { case 0: return &v.state @@ -1349,7 +1350,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ServerHandshakeParameters); i { case 0: return &v.state @@ -1361,7 +1362,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*StartServerHandshakeReq); i { case 0: return &v.state @@ -1373,7 +1374,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*NextHandshakeMessageReq); i { case 0: return &v.state @@ -1385,7 +1386,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*HandshakerReq); i { case 0: return &v.state @@ -1397,7 +1398,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResult); i { case 0: return &v.state @@ -1409,7 +1410,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*HandshakerStatus); i { case 0: return &v.state @@ -1421,7 +1422,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResp); i { case 0: return &v.state @@ -1434,12 +1435,12 @@ func file_grpc_gcp_handshaker_proto_init() { } } } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } - file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{} + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), (*HandshakerReq_Next)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 358074b6494..34443b1d2dc 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility +// for forward compatibility. type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one @@ -87,14 +87,18 @@ type HandshakerServiceServer interface { mustEmbedUnimplementedHandshakerServiceServer() } -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} +// UnimplementedHandshakerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHandshakerServiceServer struct{} func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HandshakerServiceServer will @@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface { } func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + // If the following call panics, it indicates UnimplementedHandshakerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&HandshakerService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 18cc9cfbd59..6956c14f6a9 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto package grpc_gcp @@ -253,7 +253,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{ (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version @@ -274,7 +274,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions); i { case 0: return &v.state @@ -286,7 +286,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return nil } } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions_Version); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bf..4c805c64462 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index d475cbc0894..328b838ed1f 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -38,7 +38,7 @@ type TokenSource struct { } // GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err @@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { ri, _ := credentials.RequestInfoFromContext(ctx) if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) @@ -156,7 +156,7 @@ type serviceAccount struct { t *oauth2.Token } -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { s.mu.Lock() defer s.mu.Unlock() if !s.t.Valid() { diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 4114358545e..e163a473df9 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a53..518692c3afb 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad74..e7b532b6f80 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147..11d0ae142c4 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 00000000000..074c5e234a7 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec..ceec319dd2f 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 00000000000..1d827dd5d9d --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 00000000000..3221f7a633a --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced25..f1ae080dcb8 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb99..db320105e64 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 00000000000..59c03bc14c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 00000000000..e524fdd40b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab2..07df71e98a8 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267c..4b203585707 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d7130..892dc13d164 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b88350735..d92335445f6 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06e..f96b8ab4927 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77f..d4b4b708159 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a92660..85540f86a73 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871d..9669328914a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54ec..3ec662799a8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925..64c791953d0 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507a..078bb81238b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6eac..b5a82499299 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b25d..90103847c5f 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba8e..b20802e6e96 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a51..0e6e18e185c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b86740323..2bffe477768 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d9064871394..6e7dd6b7727 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,16 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731..7617be21589 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index 6717b757f80..43423d8ad9a 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool { name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) + name = strings.ReplaceAll(name, " ", "") + name = strings.ReplaceAll(name, "\n", "") + name = strings.ReplaceAll(name, "\r", "") return name == "Google" default: return false diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de763..092ad187a2c 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16ace..8e8e861280a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedule the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0..6d8c2f518df 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775e5..683d1955c6a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c55..2c13ee9dac7 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d665398692..20b4dc3d353 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -191,6 +191,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -203,11 +205,27 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health @@ -215,7 +233,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b02..374c12fb770 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -177,7 +177,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +237,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad..b901c7bace5 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 00000000000..fd33af51ae8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 00000000000..79044657be1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,105 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc820595..1186f1e9a9a 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75b..54c24c2ff38 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd7..7e7aaa54636 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907..d5c1085eeae 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20..ef72fbb3a01 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4..ce878693bd7 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986..62b81885d8e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -83,9 +86,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -332,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -346,7 +350,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +467,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +508,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -525,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -770,7 +774,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +987,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1005,18 +1010,33 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1065,27 +1085,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1219,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1254,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1291,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1307,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1325,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1350,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } streamsToClose := make([]*Stream, 0) @@ -1368,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1603,7 +1633,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1642,11 +1678,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + errClose = connectionErrorf(true, err, "error reading from server: %v", err) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1660,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1671,15 +1706,15 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1703,7 +1738,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1745,7 +1780,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b50..584b50fe553 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442..3613d7b6481 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574..54b22443654 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97..e12cb0bc914 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,96 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +// ReadHeader reads data into the provided header slice from the stream. It +// first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered +// with partially read data, it is converted to `io.ErrUnexpectedEOF` to +// indicate an unexpected end of the stream. The method returns any error +// encountered during the read process or nil if the header was successfully +// read. +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err } t.windowHandler(n) - return + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +682,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +721,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +784,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +836,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +909,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d3..eb42b19fb99 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 00000000000..c37c58c0233 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 00000000000..228e9c2f20f --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 00000000000..ecbf0b9a73e --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,268 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. +type SliceBuffer []byte + +// ReadOnlyData returns the byte slice. +func (s SliceBuffer) ReadOnlyData() []byte { return s } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e2..d2e15253bbf 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd6336433..e87a17f36a5 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md deleted file mode 100644 index 9ace83ccb67..00000000000 --- a/vendor/google.golang.org/grpc/reflection/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Reflection - -Package reflection implements server reflection service. - -The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto. - -To register server reflection on a gRPC server: -```go -import "google.golang.org/grpc/reflection" - -s := grpc.NewServer() -pb.RegisterYourOwnServer(s, &server{}) - -// Register reflection service on gRPC server. -reflection.Register(s) - -s.Serve(lis) -``` diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go deleted file mode 100644 index 6997e474031..00000000000 --- a/vendor/google.golang.org/grpc/reflection/adapt.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package reflection - -import ( - "google.golang.org/grpc/reflection/internal" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// asV1Alpha returns an implementation of the v1alpha version of the reflection -// interface that delegates all calls to the given v1 version. -func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer { - return v1AlphaServerImpl{svr: svr} -} - -type v1AlphaServerImpl struct { - svr v1reflectiongrpc.ServerReflectionServer -} - -func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { - return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream}) -} - -type v1AlphaServerStreamAdapter struct { - v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer -} - -func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error { - return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response)) -} - -func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) { - resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv() - if err != nil { - return nil, err - } - return internal.V1AlphaToV1Request(resp), nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go deleted file mode 100644 index 666eda8e5f3..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ /dev/null @@ -1,953 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection. A more complete description of how -// server reflection works can be found at -// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 -// source: grpc/reflection/v1/reflection.proto - -package grpc_reflection_v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The message sent by the client when calling ServerReflectionInfo method. -type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - // - // Types that are assignable to MessageRequest: - // - // *ServerReflectionRequest_FileByFilename - // *ServerReflectionRequest_FileContainingSymbol - // *ServerReflectionRequest_FileContainingExtension - // *ServerReflectionRequest_AllExtensionNumbersOfType - // *ServerReflectionRequest_ListServices - MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` -} - -func (x *ServerReflectionRequest) Reset() { - *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionRequest) ProtoMessage() {} - -func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. -func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0} -} - -func (x *ServerReflectionRequest) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest - } - return nil -} - -func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename - } - return "" -} - -func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol - } - return "" -} - -func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension - } - return nil -} - -func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType - } - return "" -} - -func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices - } - return "" -} - -type isServerReflectionRequest_MessageRequest interface { - isServerReflectionRequest_MessageRequest() -} - -type ServerReflectionRequest_FileByFilename struct { - // Find a proto file by the file name. - FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingSymbol struct { - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. <package>.<service>[.<method>] or <package>.<type>). - FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingExtension struct { - // Find the proto file which defines an extension extending the given - // message type with the given field number. - FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` -} - -type ServerReflectionRequest_AllExtensionNumbersOfType struct { - // Finds the tag numbers used by all known extensions of the given message - // type, and appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // <package>.<type> - AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` -} - -type ServerReflectionRequest_ListServices struct { - // List the full names of registered services. The content will not be - // checked. - ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` -} - -func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { -} - -func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Fully-qualified type name. The format should be <package>.<type> - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionRequest) Reset() { - *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRequest) ProtoMessage() {} - -func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. -func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1} -} - -func (x *ExtensionRequest) GetContainingType() string { - if x != nil { - return x.ContainingType - } - return "" -} - -func (x *ExtensionRequest) GetExtensionNumber() int32 { - if x != nil { - return x.ExtensionNumber - } - return 0 -} - -// The message sent by the server to answer ServerReflectionInfo method. -type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` - OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server sets one of the following fields according to the message_request - // in the request. - // - // Types that are assignable to MessageResponse: - // - // *ServerReflectionResponse_FileDescriptorResponse - // *ServerReflectionResponse_AllExtensionNumbersResponse - // *ServerReflectionResponse_ListServicesResponse - // *ServerReflectionResponse_ErrorResponse - MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` -} - -func (x *ServerReflectionResponse) Reset() { - *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionResponse) ProtoMessage() {} - -func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. -func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2} -} - -func (x *ServerReflectionResponse) GetValidHost() string { - if x != nil { - return x.ValidHost - } - return "" -} - -func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { - if x != nil { - return x.OriginalRequest - } - return nil -} - -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse - } - return nil -} - -func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse - } - return nil -} - -type isServerReflectionResponse_MessageResponse interface { - isServerReflectionResponse_MessageResponse() -} - -type ServerReflectionResponse_FileDescriptorResponse struct { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. - // As the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` -} - -type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requests. - AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services requests. - ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ErrorResponse struct { - // This message is used when an error occurs. - ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` -} - -func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} - -func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` -} - -func (x *FileDescriptorResponse) Reset() { - *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileDescriptorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorResponse) ProtoMessage() {} - -func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. -func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3} -} - -func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { - if x != nil { - return x.FileDescriptorProto - } - return nil -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of the base type, including the package name. The format - // is <package>.<type> - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` - ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionNumberResponse) Reset() { - *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionNumberResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionNumberResponse) ProtoMessage() {} - -func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. -func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4} -} - -func (x *ExtensionNumberResponse) GetBaseTypeName() string { - if x != nil { - return x.BaseTypeName - } - return "" -} - -func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { - if x != nil { - return x.ExtensionNumber - } - return nil -} - -// A list of ServiceResponse sent by the server answering list_services request. -type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` -} - -func (x *ListServiceResponse) Reset() { - *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServiceResponse) ProtoMessage() {} - -func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. -func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5} -} - -func (x *ListServiceResponse) GetService() []*ServiceResponse { - if x != nil { - return x.Service - } - return nil -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of a registered service, including its package name. The format - // is <package>.<service> - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *ServiceResponse) Reset() { - *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceResponse) ProtoMessage() {} - -func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. -func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6} -} - -func (x *ServiceResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The error code and error message sent by the server when an error occurs. -type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *ErrorResponse) Reset() { - *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorResponse) ProtoMessage() {} - -func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. -func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7} -} - -func (x *ErrorResponse) GetErrorCode() int32 { - if x != nil { - return x.ErrorCode - } - return 0 -} - -func (x *ErrorResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor - -var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ - 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xf3, 0x02, 0x0a, 0x17, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, - 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x62, 0x0a, - 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, - 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, - 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x66, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xae, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, - 0x6f, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x22, 0x54, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x53, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x32, 0x89, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x75, 0x0a, 0x14, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x2b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, - 0x42, 0x66, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc -) - -func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { - file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) - }) - return file_grpc_reflection_v1_reflection_proto_rawDescData -} - -var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1_reflection_proto_goTypes = []interface{}{ - (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest - (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest - (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse - (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse - (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse - (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse - (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse - (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse -} -var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{ - 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest - 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest - 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse - 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse - 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse - 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse - 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse - 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest - 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_grpc_reflection_v1_reflection_proto_init() } -func file_grpc_reflection_v1_reflection_proto_init() { - if File_grpc_reflection_v1_reflection_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ServerReflectionRequest_FileByFilename)(nil), - (*ServerReflectionRequest_FileContainingSymbol)(nil), - (*ServerReflectionRequest_FileContainingExtension)(nil), - (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), - (*ServerReflectionRequest_ListServices)(nil), - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*ServerReflectionResponse_FileDescriptorResponse)(nil), - (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), - (*ServerReflectionResponse_ListServicesResponse)(nil), - (*ServerReflectionResponse_ErrorResponse)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes, - DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs, - MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, - }.Build() - File_grpc_reflection_v1_reflection_proto = out.File - file_grpc_reflection_v1_reflection_proto_rawDesc = nil - file_grpc_reflection_v1_reflection_proto_goTypes = nil - file_grpc_reflection_v1_reflection_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go deleted file mode 100644 index 17d21fde22a..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection. A more complete description of how -// server reflection works can be found at -// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md -// -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 -// source: grpc/reflection/v1/reflection.proto - -package grpc_reflection_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" -) - -// ServerReflectionClient is the client API for ServerReflection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ServerReflectionClient interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) -} - -type serverReflectionClient struct { - cc grpc.ClientConnInterface -} - -func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { - return &serverReflectionClient{cc} -} - -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} - return x, nil -} - -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflectionServer is the server API for ServerReflection service. -// All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility -type ServerReflectionServer interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error -} - -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} - -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") -} - -// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ServerReflectionServer will -// result in compilation errors. -type UnsafeServerReflectionServer interface { - mustEmbedUnimplementedServerReflectionServer() -} - -func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { - s.RegisterService(&ServerReflection_ServiceDesc, srv) -} - -func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream -} - -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ServerReflection_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.reflection.v1.ServerReflection", - HandlerType: (*ServerReflectionServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "ServerReflectionInfo", - Handler: _ServerReflection_ServerReflectionInfo_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/reflection/v1/reflection.proto", -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go deleted file mode 100644 index cd032acefca..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ /dev/null @@ -1,1028 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Service exported by server reflection - -// Warning: this entire file is deprecated. Use this instead: -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 -// grpc/reflection/v1alpha/reflection.proto is a deprecated file. - -package grpc_reflection_v1alpha - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The message sent by the client when calling ServerReflectionInfo method. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - // - // Types that are assignable to MessageRequest: - // - // *ServerReflectionRequest_FileByFilename - // *ServerReflectionRequest_FileContainingSymbol - // *ServerReflectionRequest_FileContainingExtension - // *ServerReflectionRequest_AllExtensionNumbersOfType - // *ServerReflectionRequest_ListServices - MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` -} - -func (x *ServerReflectionRequest) Reset() { - *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionRequest) ProtoMessage() {} - -func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead. -func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices - } - return "" -} - -type isServerReflectionRequest_MessageRequest interface { - isServerReflectionRequest_MessageRequest() -} - -type ServerReflectionRequest_FileByFilename struct { - // Find a proto file by the file name. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingSymbol struct { - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. <package>.<service>[.<method>] or <package>.<type>). - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` -} - -type ServerReflectionRequest_FileContainingExtension struct { - // Find the proto file which defines an extension extending the given - // message type with the given field number. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` -} - -type ServerReflectionRequest_AllExtensionNumbersOfType struct { - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // <package>.<type> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` -} - -type ServerReflectionRequest_ListServices struct { - // List the full names of registered services. The content will not be - // checked. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` -} - -func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} - -func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { -} - -func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Fully-qualified type name. The format should be <package>.<type> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionRequest) Reset() { - *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRequest) ProtoMessage() {} - -func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead. -func (*ExtensionRequest) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionRequest) GetContainingType() string { - if x != nil { - return x.ContainingType - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionRequest) GetExtensionNumber() int32 { - if x != nil { - return x.ExtensionNumber - } - return 0 -} - -// The message sent by the server to answer ServerReflectionInfo method. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` - // The server set one of the following fields according to the message_request - // in the request. - // - // Types that are assignable to MessageResponse: - // - // *ServerReflectionResponse_FileDescriptorResponse - // *ServerReflectionResponse_AllExtensionNumbersResponse - // *ServerReflectionResponse_ListServicesResponse - // *ServerReflectionResponse_ErrorResponse - MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` -} - -func (x *ServerReflectionResponse) Reset() { - *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServerReflectionResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServerReflectionResponse) ProtoMessage() {} - -func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead. -func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetValidHost() string { - if x != nil { - return x.ValidHost - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { - if x != nil { - return x.OriginalRequest - } - return nil -} - -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse - } - return nil -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse - } - return nil -} - -type isServerReflectionResponse_MessageResponse interface { - isServerReflectionResponse_MessageResponse() -} - -type ServerReflectionResponse_FileDescriptorResponse struct { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. As - // the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` -} - -type ServerReflectionResponse_AllExtensionNumbersResponse struct { - // This message is used to answer all_extension_numbers_of_type requst. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ListServicesResponse struct { - // This message is used to answer list_services request. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` -} - -type ServerReflectionResponse_ErrorResponse struct { - // This message is used when an error occurs. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` -} - -func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { -} - -func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} - -func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` -} - -func (x *FileDescriptorResponse) Reset() { - *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileDescriptorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorResponse) ProtoMessage() {} - -func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead. -func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { - if x != nil { - return x.FileDescriptorProto - } - return nil -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of the base type, including the package name. The format - // is <package>.<type> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` -} - -func (x *ExtensionNumberResponse) Reset() { - *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionNumberResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionNumberResponse) ProtoMessage() {} - -func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead. -func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionNumberResponse) GetBaseTypeName() string { - if x != nil { - return x.BaseTypeName - } - return "" -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { - if x != nil { - return x.ExtensionNumber - } - return nil -} - -// A list of ServiceResponse sent by the server answering list_services request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` -} - -func (x *ListServiceResponse) Reset() { - *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListServiceResponse) ProtoMessage() {} - -func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead. -func (*ListServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ListServiceResponse) GetService() []*ServiceResponse { - if x != nil { - return x.Service - } - return nil -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Full name of a registered service, including its package name. The format - // is <package>.<service> - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *ServiceResponse) Reset() { - *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ServiceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceResponse) ProtoMessage() {} - -func (x *ServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead. -func (*ServiceResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ServiceResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -// The error code and error message sent by the server when an error occurs. -// -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // This field uses the error codes defined in grpc::StatusCode. - // - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` -} - -func (x *ErrorResponse) Reset() { - *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorResponse) ProtoMessage() {} - -func (x *ErrorResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead. -func (*ErrorResponse) Descriptor() ([]byte, []int) { - return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7} -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ErrorResponse) GetErrorCode() int32 { - if x != nil { - return x.ErrorCode - } - return 0 -} - -// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. -func (x *ErrorResponse) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor - -var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x22, 0xf8, 0x02, 0x0a, 0x17, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0e, 0x66, 0x69, 0x6c, 0x65, 0x42, 0x79, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x36, 0x0a, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x00, 0x52, 0x14, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x12, 0x67, 0x0a, 0x19, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x42, 0x0a, 0x1d, 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x4f, 0x66, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0d, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x6c, - 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x66, - 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xc7, 0x04, 0x0a, 0x18, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x6f, - 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x10, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, - 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x6b, 0x0a, 0x18, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x66, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x1e, - 0x61, 0x6c, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1b, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x16, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0e, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x45, 0x72, - 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x12, 0x0a, 0x10, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4c, 0x0a, 0x16, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x66, 0x69, - 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, - 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x59, 0x0a, 0x13, 0x4c, 0x69, - 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x42, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x25, 0x0a, 0x0f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x53, 0x0a, 0x0d, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x32, 0x93, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, - 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x73, 0x0a, 0x1a, 0x69, 0x6f, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x66, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc -) - -func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { - file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) - }) - return file_grpc_reflection_v1alpha_reflection_proto_rawDescData -} - -var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []interface{}{ - (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest - (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest - (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse - (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse - (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse - (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse - (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse - (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse -} -var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{ - 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest - 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest - 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse - 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse - 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse - 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse - 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse - 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest - 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse - 8, // [8:9] is the sub-list for method output_type - 7, // [7:8] is the sub-list for method input_type - 7, // [7:7] is the sub-list for extension type_name - 7, // [7:7] is the sub-list for extension extendee - 0, // [0:7] is the sub-list for field type_name -} - -func init() { file_grpc_reflection_v1alpha_reflection_proto_init() } -func file_grpc_reflection_v1alpha_reflection_proto_init() { - if File_grpc_reflection_v1alpha_reflection_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ServerReflectionRequest_FileByFilename)(nil), - (*ServerReflectionRequest_FileContainingSymbol)(nil), - (*ServerReflectionRequest_FileContainingExtension)(nil), - (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), - (*ServerReflectionRequest_ListServices)(nil), - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*ServerReflectionResponse_FileDescriptorResponse)(nil), - (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), - (*ServerReflectionResponse_ListServicesResponse)(nil), - (*ServerReflectionResponse_ErrorResponse)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes, - DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs, - MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, - }.Build() - File_grpc_reflection_v1alpha_reflection_proto = out.File - file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil - file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil - file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go deleted file mode 100644 index 93886e38216..00000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2016 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Service exported by server reflection - -// Warning: this entire file is deprecated. Use this instead: -// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto - -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 -// grpc/reflection/v1alpha/reflection.proto is a deprecated file. - -package grpc_reflection_v1alpha - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 - -const ( - ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" -) - -// ServerReflectionClient is the client API for ServerReflection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ServerReflectionClient interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) -} - -type serverReflectionClient struct { - cc grpc.ClientConnInterface -} - -func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { - return &serverReflectionClient{cc} -} - -func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...) - if err != nil { - return nil, err - } - x := &serverReflectionServerReflectionInfoClient{ClientStream: stream} - return x, nil -} - -type ServerReflection_ServerReflectionInfoClient interface { - Send(*ServerReflectionRequest) error - Recv() (*ServerReflectionResponse, error) - grpc.ClientStream -} - -type serverReflectionServerReflectionInfoClient struct { - grpc.ClientStream -} - -func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { - m := new(ServerReflectionResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflectionServer is the server API for ServerReflection service. -// All implementations should embed UnimplementedServerReflectionServer -// for forward compatibility -type ServerReflectionServer interface { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error -} - -// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. -type UnimplementedServerReflectionServer struct { -} - -func (UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { - return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") -} - -// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ServerReflectionServer will -// result in compilation errors. -type UnsafeServerReflectionServer interface { - mustEmbedUnimplementedServerReflectionServer() -} - -func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) { - s.RegisterService(&ServerReflection_ServiceDesc, srv) -} - -func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{ServerStream: stream}) -} - -type ServerReflection_ServerReflectionInfoServer interface { - Send(*ServerReflectionResponse) error - Recv() (*ServerReflectionRequest, error) - grpc.ServerStream -} - -type serverReflectionServerReflectionInfoServer struct { - grpc.ServerStream -} - -func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { - m := new(ServerReflectionRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var ServerReflection_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.reflection.v1alpha.ServerReflection", - HandlerType: (*ServerReflectionServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "ServerReflectionInfo", - Handler: _ServerReflection_ServerReflectionInfo_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc/reflection/v1alpha/reflection.proto", -} diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go deleted file mode 100644 index 36ee6507507..00000000000 --- a/vendor/google.golang.org/grpc/reflection/internal/internal.go +++ /dev/null @@ -1,436 +0,0 @@ -/* - * - * Copyright 2024 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package internal contains code that is shared by both reflection package and -// the test package. The packages are split in this way inorder to avoid -// depenedency to deprecated package github.com/golang/protobuf. -package internal - -import ( - "io" - "sort" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" - v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// ServiceInfoProvider is an interface used to retrieve metadata about the -// services to expose. -type ServiceInfoProvider interface { - GetServiceInfo() map[string]grpc.ServiceInfo -} - -// ExtensionResolver is the interface used to query details about extensions. -// This interface is satisfied by protoregistry.GlobalTypes. -type ExtensionResolver interface { - protoregistry.ExtensionTypeResolver - RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) -} - -// ServerReflectionServer is the server API for ServerReflection service. -type ServerReflectionServer struct { - v1alphareflectiongrpc.UnimplementedServerReflectionServer - S ServiceInfoProvider - DescResolver protodesc.Resolver - ExtResolver ExtensionResolver -} - -// FileDescWithDependencies returns a slice of serialized fileDescriptors in -// wire format ([]byte). The fileDescriptors will include fd and all the -// transitive dependencies of fd with names not in sentFileDescriptors. -func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) { - if fd.IsPlaceholder() { - // If the given root file is a placeholder, treat it - // as missing instead of serializing it. - return nil, protoregistry.NotFound - } - var r [][]byte - queue := []protoreflect.FileDescriptor{fd} - for len(queue) > 0 { - currentfd := queue[0] - queue = queue[1:] - if currentfd.IsPlaceholder() { - // Skip any missing files in the dependency graph. - continue - } - if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent { - sentFileDescriptors[currentfd.Path()] = true - fdProto := protodesc.ToFileDescriptorProto(currentfd) - currentfdEncoded, err := proto.Marshal(fdProto) - if err != nil { - return nil, err - } - r = append(r, currentfdEncoded) - } - for i := 0; i < currentfd.Imports().Len(); i++ { - queue = append(queue, currentfd.Imports().Get(i)) - } - } - return r, nil -} - -// FileDescEncodingContainingSymbol finds the file descriptor containing the -// given symbol, finds all of its previously unsent transitive dependencies, -// does marshalling on them, and returns the marshalled result. The given symbol -// can be a type, a service or a method. -func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) { - d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)) - if err != nil { - return nil, err - } - return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors) -} - -// FileDescEncodingContainingExtension finds the file descriptor containing -// given extension, finds all of its previously unsent transitive dependencies, -// does marshalling on them, and returns the marshalled result. -func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) { - xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum)) - if err != nil { - return nil, err - } - return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors) -} - -// AllExtensionNumbersForTypeName returns all extension numbers for the given type. -func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) { - var numbers []int32 - s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool { - numbers = append(numbers, int32(xt.TypeDescriptor().Number())) - return true - }) - sort.Slice(numbers, func(i, j int) bool { - return numbers[i] < numbers[j] - }) - if len(numbers) == 0 { - // maybe return an error if given type name is not known - if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil { - return nil, err - } - } - return numbers, nil -} - -// ListServices returns the names of services this server exposes. -func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse { - serviceInfo := s.S.GetServiceInfo() - resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo)) - for svc := range serviceInfo { - resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc}) - } - sort.Slice(resp, func(i, j int) bool { - return resp[i].Name < resp[j].Name - }) - return resp -} - -// ServerReflectionInfo is the reflection service handler. -func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error { - sentFileDescriptors := make(map[string]bool) - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - out := &v1reflectionpb.ServerReflectionResponse{ - ValidHost: in.Host, - OriginalRequest: in, - } - switch req := in.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - var b [][]byte - fd, err := s.DescResolver.FindFileByPath(req.FileByFilename) - if err == nil { - b, err = s.FileDescWithDependencies(fd, sentFileDescriptors) - } - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - typeName := req.FileContainingExtension.ContainingType - extNum := req.FileContainingExtension.ExtensionNumber - b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b}, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) - if err != nil { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: int32(codes.NotFound), - ErrorMessage: err.Error(), - }, - } - } else { - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ - BaseTypeName: req.AllExtensionNumbersOfType, - ExtensionNumber: extNums, - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1reflectionpb.ListServiceResponse{ - Service: s.ListServices(), - }, - } - default: - return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) - } - - if err := stream.Send(out); err != nil { - return err - } - } -} - -// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha. -func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse { - var v1alpha v1alphareflectionpb.ServerReflectionResponse - v1alpha.ValidHost = v1.ValidHost - if v1.OriginalRequest != nil { - v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest) - } - switch mr := v1.MessageResponse.(type) { - case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1alphareflectionpb.ServiceResponse{ - Name: svc.GetName(), - } - } - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{ - Service: svcs, - }, - } - } - case *v1reflectionpb.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1alphareflectionpb.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1alpha -} - -// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1. -func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest { - var v1 v1reflectionpb.ServerReflectionRequest - v1.Host = v1alpha.Host - switch mr := v1alpha.MessageRequest.(type) { - case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr.FileContainingExtension != nil { - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1reflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - case *v1alphareflectionpb.ServerReflectionRequest_ListServices: - v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - default: - // no value set - } - return &v1 -} - -// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha. -func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest { - var v1alpha v1alphareflectionpb.ServerReflectionRequest - v1alpha.Host = v1.Host - switch mr := v1.MessageRequest.(type) { - case *v1reflectionpb.ServerReflectionRequest_FileByFilename: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{ - FileByFilename: mr.FileByFilename, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{ - FileContainingSymbol: mr.FileContainingSymbol, - } - } - case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{ - FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{ - ContainingType: mr.FileContainingExtension.GetContainingType(), - ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(), - }, - } - } - case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{ - AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType, - } - } - case *v1reflectionpb.ServerReflectionRequest_ListServices: - if mr != nil { - v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{ - ListServices: mr.ListServices, - } - } - default: - // no value set - } - return &v1alpha -} - -// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1. -func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse { - var v1 v1reflectionpb.ServerReflectionResponse - v1.ValidHost = v1alpha.ValidHost - if v1alpha.OriginalRequest != nil { - v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest) - } - switch mr := v1alpha.MessageResponse.(type) { - case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse: - if mr != nil { - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{ - FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{ - FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse: - if mr != nil { - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{ - AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{ - BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(), - ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(), - }, - } - } - case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse: - if mr != nil { - svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService())) - for i, svc := range mr.ListServicesResponse.GetService() { - svcs[i] = &v1reflectionpb.ServiceResponse{ - Name: svc.GetName(), - } - } - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{ - ListServicesResponse: &v1reflectionpb.ListServiceResponse{ - Service: svcs, - }, - } - } - case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse: - if mr != nil { - v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{ - ErrorResponse: &v1reflectionpb.ErrorResponse{ - ErrorCode: mr.ErrorResponse.GetErrorCode(), - ErrorMessage: mr.ErrorResponse.GetErrorMessage(), - }, - } - } - default: - // no value set - } - return &v1 -} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go deleted file mode 100644 index 13a94e2dd2e..00000000000 --- a/vendor/google.golang.org/grpc/reflection/serverreflection.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* -Package reflection implements server reflection service. - -The service implemented is defined in: -https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. - -To register server reflection on a gRPC server: - - import "google.golang.org/grpc/reflection" - - s := grpc.NewServer() - pb.RegisterYourOwnServer(s, &server{}) - - // Register reflection service on gRPC server. - reflection.Register(s) - - s.Serve(lis) -*/ -package reflection // import "google.golang.org/grpc/reflection" - -import ( - "google.golang.org/grpc" - "google.golang.org/grpc/reflection/internal" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1" - v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" -) - -// GRPCServer is the interface provided by a gRPC server. It is implemented by -// *grpc.Server, but could also be implemented by other concrete types. It acts -// as a registry, for accumulating the services exposed by the server. -type GRPCServer interface { - grpc.ServiceRegistrar - ServiceInfoProvider -} - -var _ GRPCServer = (*grpc.Server)(nil) - -// Register registers the server reflection service on the given gRPC server. -// Both the v1 and v1alpha versions are registered. -func Register(s GRPCServer) { - svr := NewServerV1(ServerOptions{Services: s}) - v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr)) - v1reflectiongrpc.RegisterServerReflectionServer(s, svr) -} - -// RegisterV1 registers only the v1 version of the server reflection service -// on the given gRPC server. Many clients may only support v1alpha so most -// users should use Register instead, at least until clients have upgraded. -func RegisterV1(s GRPCServer) { - svr := NewServerV1(ServerOptions{Services: s}) - v1reflectiongrpc.RegisterServerReflectionServer(s, svr) -} - -// ServiceInfoProvider is an interface used to retrieve metadata about the -// services to expose. -// -// The reflection service is only interested in the service names, but the -// signature is this way so that *grpc.Server implements it. So it is okay -// for a custom implementation to return zero values for the -// grpc.ServiceInfo values in the map. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ServiceInfoProvider interface { - GetServiceInfo() map[string]grpc.ServiceInfo -} - -// ExtensionResolver is the interface used to query details about extensions. -// This interface is satisfied by protoregistry.GlobalTypes. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ExtensionResolver interface { - protoregistry.ExtensionTypeResolver - RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) -} - -// ServerOptions represents the options used to construct a reflection server. -// -// # Experimental -// -// Notice: This type is EXPERIMENTAL and may be changed or removed in a -// later release. -type ServerOptions struct { - // The source of advertised RPC services. If not specified, the reflection - // server will report an empty list when asked to list services. - // - // This value will typically be a *grpc.Server. But the set of advertised - // services can be customized by wrapping a *grpc.Server or using an - // alternate implementation that returns a custom set of service names. - Services ServiceInfoProvider - // Optional resolver used to load descriptors. If not specified, - // protoregistry.GlobalFiles will be used. - DescriptorResolver protodesc.Resolver - // Optional resolver used to query for known extensions. If not specified, - // protoregistry.GlobalTypes will be used. - ExtensionResolver ExtensionResolver -} - -// NewServer returns a reflection server implementation using the given options. -// This can be used to customize behavior of the reflection service. Most usages -// should prefer to use Register instead. For backwards compatibility reasons, -// this returns the v1alpha version of the reflection server. For a v1 version -// of the reflection server, see NewServerV1. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer { - return asV1Alpha(NewServerV1(opts)) -} - -// NewServerV1 returns a reflection server implementation using the given options. -// This can be used to customize behavior of the reflection service. Most usages -// should prefer to use Register instead. -// -// # Experimental -// -// Notice: This function is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer { - if opts.DescriptorResolver == nil { - opts.DescriptorResolver = protoregistry.GlobalFiles - } - if opts.ExtensionResolver == nil { - opts.ExtensionResolver = protoregistry.GlobalTypes - } - return &internal.ServerReflectionServer{ - S: opts.Services, - DescResolver: opts.DescriptorResolver, - ExtResolver: opts.ExtensionResolver, - } -} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c22..00000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5..09e864a89d3 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236fa..23bb3fb2582 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e915..aba1ae3e678 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +802,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1027,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1036,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792bf..d1e1415a40f 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e2..00000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182..71195c4943d 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b514..bb2b2a216ce 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0c..0037fee0bd7 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go index 3f77f4876eb..e6eb4feebb9 100644 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -109,7 +109,7 @@ type pipe struct { mu sync.Mutex // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. + // with r and w pointing to the offset to read and write, respectively. // // Data is read between [r, w) and written to [w, r), wrapping around the end // of the slice if necessary. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be9..5a47094ae87 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.68.1" diff --git a/vendor/modules.txt b/vendor/modules.txt index bfb0ae7df5b..217e7200ab0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,8 +4,8 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.7.0 -## explicit; go 1.20 +# cloud.google.com/go/auth v0.9.9 +## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials cloud.google.com/go/auth/credentials/idtoken @@ -18,17 +18,18 @@ cloud.google.com/go/auth/credentials/internal/stsexchange cloud.google.com/go/auth/grpctransport cloud.google.com/go/auth/httptransport cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport cloud.google.com/go/auth/internal/transport/cert -# cloud.google.com/go/auth/oauth2adapt v0.2.2 -## explicit; go 1.19 +# cloud.google.com/go/auth/oauth2adapt v0.2.6 +## explicit; go 1.21 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.0 -## explicit; go 1.20 +# cloud.google.com/go/compute/metadata v0.5.2 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.10 +# cloud.google.com/go/iam v1.1.11 ## explicit; go 1.20 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb @@ -73,7 +74,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob @@ -88,7 +89,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service -# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 +# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.3 ## explicit; go 1.18 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential @@ -111,12 +112,9 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -# github.com/IBM/sarama v1.43.2 +# github.com/IBM/sarama v1.43.3 ## explicit; go 1.19 github.com/IBM/sarama -# github.com/VividCortex/gohistogram v1.0.0 -## explicit -github.com/VividCortex/gohistogram # github.com/alecthomas/kong v0.8.0 ## explicit; go 1.18 github.com/alecthomas/kong @@ -124,10 +122,10 @@ github.com/alecthomas/kong ## explicit; go 1.18 github.com/alecthomas/participle/v2 github.com/alecthomas/participle/v2/lexer -# github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 +# github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b ## explicit; go 1.15 github.com/alecthomas/units -# github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a +# github.com/alicebob/gopher-json v0.0.0-20230218143504-906a9b012302 ## explicit github.com/alicebob/gopher-json # github.com/alicebob/miniredis/v2 v2.21.0 @@ -141,8 +139,14 @@ github.com/alicebob/miniredis/v2/server ## explicit; go 1.13 github.com/andybalholm/brotli github.com/andybalholm/brotli/matchfinder -# github.com/apache/thrift v0.20.0 -## explicit; go 1.21 +# github.com/antchfx/xmlquery v1.4.2 +## explicit; go 1.14 +github.com/antchfx/xmlquery +# github.com/antchfx/xpath v1.3.2 +## explicit; go 1.14 +github.com/antchfx/xpath +# github.com/apache/thrift v0.21.0 +## explicit; go 1.22.0 github.com/apache/thrift/lib/go/thrift # github.com/armon/go-metrics v0.4.1 ## explicit; go 1.12 @@ -151,6 +155,9 @@ github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator +# github.com/aws/aws-msk-iam-sasl-signer-go v1.0.0 +## explicit; go 1.17 +github.com/aws/aws-msk-iam-sasl-signer-go/signer # github.com/aws/aws-sdk-go v1.55.5 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws @@ -206,8 +213,89 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/aws/aws-sdk-go-v2 v1.22.2 ## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/aws +github.com/aws/aws-sdk-go-v2/aws/defaults +github.com/aws/aws-sdk-go-v2/aws/middleware +github.com/aws/aws-sdk-go-v2/aws/protocol/query +github.com/aws/aws-sdk-go-v2/aws/protocol/restjson +github.com/aws/aws-sdk-go-v2/aws/protocol/xml +github.com/aws/aws-sdk-go-v2/aws/ratelimit +github.com/aws/aws-sdk-go-v2/aws/retry +github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 +github.com/aws/aws-sdk-go-v2/aws/signer/v4 +github.com/aws/aws-sdk-go-v2/aws/transport/http +github.com/aws/aws-sdk-go-v2/internal/auth +github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn +github.com/aws/aws-sdk-go-v2/internal/rand +github.com/aws/aws-sdk-go-v2/internal/sdk +github.com/aws/aws-sdk-go-v2/internal/sdkio +github.com/aws/aws-sdk-go-v2/internal/shareddefaults +github.com/aws/aws-sdk-go-v2/internal/strings +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight +github.com/aws/aws-sdk-go-v2/internal/timeconv # github.com/aws/aws-sdk-go-v2/config v1.24.0 ## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/config +# github.com/aws/aws-sdk-go-v2/credentials v1.15.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/credentials +github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds +github.com/aws/aws-sdk-go-v2/credentials/endpointcreds +github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client +github.com/aws/aws-sdk-go-v2/credentials/processcreds +github.com/aws/aws-sdk-go-v2/credentials/ssocreds +github.com/aws/aws-sdk-go-v2/credentials/stscreds +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.3 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds +github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/internal/configsources +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/internal/ini +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.2 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url +# github.com/aws/aws-sdk-go-v2/service/sso v1.17.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/sso +github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sso/types +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/ssooidc +github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/ssooidc/types +# github.com/aws/aws-sdk-go-v2/service/sts v1.25.1 +## explicit; go 1.19 +github.com/aws/aws-sdk-go-v2/service/sts +github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints +github.com/aws/aws-sdk-go-v2/service/sts/types +# github.com/aws/smithy-go v1.16.0 +## explicit; go 1.19 +github.com/aws/smithy-go +github.com/aws/smithy-go/auth/bearer +github.com/aws/smithy-go/context +github.com/aws/smithy-go/document +github.com/aws/smithy-go/encoding +github.com/aws/smithy-go/encoding/httpbinding +github.com/aws/smithy-go/encoding/json +github.com/aws/smithy-go/encoding/xml +github.com/aws/smithy-go/endpoints +github.com/aws/smithy-go/internal/sync/singleflight +github.com/aws/smithy-go/io +github.com/aws/smithy-go/logging +github.com/aws/smithy-go/middleware +github.com/aws/smithy-go/ptr +github.com/aws/smithy-go/rand +github.com/aws/smithy-go/time +github.com/aws/smithy-go/transport/http +github.com/aws/smithy-go/transport/http/internal/io # github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 ## explicit; go 1.20 github.com/bboreham/go-loser @@ -258,7 +346,7 @@ github.com/drone/envsubst/path # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize -# github.com/eapache/go-resiliency v1.6.0 +# github.com/eapache/go-resiliency v1.7.0 ## explicit; go 1.13 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 @@ -267,13 +355,30 @@ github.com/eapache/go-xerial-snappy # github.com/eapache/queue v1.1.0 ## explicit github.com/eapache/queue +# github.com/ebitengine/purego v0.8.1 +## explicit; go 1.18 +github.com/ebitengine/purego +github.com/ebitengine/purego/internal/cgo +github.com/ebitengine/purego/internal/fakecgo +github.com/ebitengine/purego/internal/strings # github.com/edsrzf/mmap-go v1.1.0 ## explicit; go 1.17 github.com/edsrzf/mmap-go +# github.com/elastic/go-grok v0.3.1 +## explicit; go 1.21.0 +github.com/elastic/go-grok +github.com/elastic/go-grok/dev-tools/mage +github.com/elastic/go-grok/dev-tools/mage/gotool +github.com/elastic/go-grok/patterns +# github.com/elastic/lunes v0.1.0 +## explicit; go 1.21.0 +github.com/elastic/lunes +github.com/elastic/lunes/dev-tools/mage +github.com/elastic/lunes/dev-tools/mage/gotool # github.com/evanphx/json-patch v5.9.0+incompatible ## explicit github.com/evanphx/json-patch -# github.com/expr-lang/expr v1.16.2 +# github.com/expr-lang/expr v1.16.9 ## explicit; go 1.18 github.com/expr-lang/expr github.com/expr-lang/expr/ast @@ -300,18 +405,13 @@ github.com/fatih/color # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/go-ini/ini v1.67.0 ## explicit github.com/go-ini/ini -# github.com/go-kit/kit v0.13.0 -## explicit; go 1.17 -github.com/go-kit/kit/metrics -github.com/go-kit/kit/metrics/expvar -github.com/go-kit/kit/metrics/generic -github.com/go-kit/kit/metrics/internal/lv # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log @@ -377,9 +477,10 @@ github.com/go-redis/redis/v8/internal/util # github.com/go-test/deep v1.1.1 ## explicit; go 1.16 github.com/go-test/deep -# github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 +# github.com/go-viper/mapstructure/v2 v2.2.1 ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors # github.com/gobwas/glob v0.2.3 ## explicit github.com/gobwas/glob @@ -390,7 +491,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/goccy/go-json v0.10.3 +# github.com/goccy/go-json v0.10.4 ## explicit; go 1.19 github.com/goccy/go-json github.com/goccy/go-json/internal/decoder @@ -419,8 +520,8 @@ github.com/gogo/status # github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit +# github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 +## explicit; go 1.20 github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 @@ -443,8 +544,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/s2a-go v0.1.7 -## explicit; go 1.19 +# github.com/google/s2a-go v0.1.8 +## explicit; go 1.20 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -469,7 +570,7 @@ github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -480,8 +581,8 @@ github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal -# github.com/gorilla/handlers v1.5.1 -## explicit; go 1.14 +# github.com/gorilla/handlers v1.5.2 +## explicit; go 1.20 github.com/gorilla/handlers # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 @@ -526,6 +627,7 @@ github.com/grafana/dskit/signals github.com/grafana/dskit/spanlogger github.com/grafana/dskit/spanprofiler github.com/grafana/dskit/tenant +github.com/grafana/dskit/test github.com/grafana/dskit/tracing github.com/grafana/dskit/user # github.com/grafana/e2e v0.1.1 @@ -533,8 +635,8 @@ github.com/grafana/dskit/user github.com/grafana/e2e github.com/grafana/e2e/db github.com/grafana/e2e/images -# github.com/grafana/gomemcache v0.0.0-20240229205252-cd6a66d6fb56 -## explicit; go 1.18 +# github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 +## explicit; go 1.22 github.com/grafana/gomemcache/memcache # github.com/grafana/pyroscope-go/godeltaprof v0.1.8 ## explicit; go 1.18 @@ -548,16 +650,12 @@ github.com/grafana/regexp/syntax # github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware -github.com/grpc-ecosystem/go-grpc-middleware/logging -github.com/grpc-ecosystem/go-grpc-middleware/logging/settable -github.com/grpc-ecosystem/go-grpc-middleware/logging/zap -github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap -github.com/grpc-ecosystem/go-grpc-middleware/retry -github.com/grpc-ecosystem/go-grpc-middleware/tags -github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils -github.com/grpc-ecosystem/go-grpc-middleware/util/metautils -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 -## explicit; go 1.20 +# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 +## explicit; go 1.19 +github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/retry +github.com/grpc-ecosystem/go-grpc-middleware/v2/metadata +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 +## explicit; go 1.22.7 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -582,13 +680,6 @@ github.com/hashicorp/go-msgpack/codec # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.6.0 -## explicit; go 1.17 -github.com/hashicorp/go-plugin -github.com/hashicorp/go-plugin/internal/cmdrunner -github.com/hashicorp/go-plugin/internal/grpcmux -github.com/hashicorp/go-plugin/internal/plugin -github.com/hashicorp/go-plugin/runner # github.com/hashicorp/go-rootcerts v1.0.2 ## explicit; go 1.12 github.com/hashicorp/go-rootcerts @@ -603,6 +694,7 @@ github.com/hashicorp/go-uuid github.com/hashicorp/go-version # github.com/hashicorp/golang-lru v1.0.2 ## explicit; go 1.12 +github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 @@ -627,17 +719,14 @@ github.com/hashicorp/memberlist # github.com/hashicorp/serf v0.10.1 ## explicit; go 1.12 github.com/hashicorp/serf/coordinate -# github.com/hashicorp/yamux v0.1.1 -## explicit; go 1.15 -github.com/hashicorp/yamux # github.com/iancoleman/strcase v0.3.0 ## explicit; go 1.16 github.com/iancoleman/strcase # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jaegertracing/jaeger v1.57.0 -## explicit; go 1.21 +# github.com/jaegertracing/jaeger v1.63.0 +## explicit; go 1.22.7 github.com/jaegertracing/jaeger/cmd/agent/app/configmanager github.com/jaegertracing/jaeger/cmd/agent/app/configmanager/grpc github.com/jaegertracing/jaeger/cmd/agent/app/customtransport @@ -649,10 +738,9 @@ github.com/jaegertracing/jaeger/cmd/agent/app/servers github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp github.com/jaegertracing/jaeger/cmd/all-in-one/setupcontext github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model -github.com/jaegertracing/jaeger/cmd/collector/app/sampling/strategystore +github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy github.com/jaegertracing/jaeger/cmd/collector/app/sanitizer/zipkin github.com/jaegertracing/jaeger/cmd/internal/flags -github.com/jaegertracing/jaeger/internal/metrics/expvar github.com/jaegertracing/jaeger/internal/metrics/metricsbuilder github.com/jaegertracing/jaeger/internal/metrics/prometheus github.com/jaegertracing/jaeger/model @@ -660,7 +748,6 @@ github.com/jaegertracing/jaeger/model/converter/json github.com/jaegertracing/jaeger/model/converter/thrift/jaeger github.com/jaegertracing/jaeger/model/converter/thrift/zipkin github.com/jaegertracing/jaeger/model/json -github.com/jaegertracing/jaeger/pkg/bearertoken github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp github.com/jaegertracing/jaeger/pkg/config/tlscfg github.com/jaegertracing/jaeger/pkg/discovery @@ -774,23 +861,27 @@ github.com/klauspost/compress/snappy github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.8 -## explicit; go 1.15 +# github.com/klauspost/cpuid/v2 v2.2.9 +## explicit; go 1.20 github.com/klauspost/cpuid/v2 # github.com/knadh/koanf v1.5.0 ## explicit; go 1.12 github.com/knadh/koanf/maps github.com/knadh/koanf/providers/confmap -# github.com/knadh/koanf/v2 v2.1.1 +# github.com/knadh/koanf/v2 v2.1.2 ## explicit; go 1.18 github.com/knadh/koanf/v2 # github.com/kylelemons/godebug v1.1.0 ## explicit; go 1.11 github.com/kylelemons/godebug/diff github.com/kylelemons/godebug/pretty -# github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c +# github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 ## explicit; go 1.16 github.com/lufia/plan9stats +# github.com/magefile/mage v1.15.0 +## explicit; go 1.12 +github.com/magefile/mage/mg +github.com/magefile/mage/sh # github.com/magiconair/properties v1.8.7 ## explicit; go 1.19 github.com/magiconair/properties @@ -814,7 +905,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.81 +# github.com/minio/minio-go/v7 v7.0.80 ## explicit; go 1.22 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors @@ -834,9 +925,6 @@ github.com/mitchellh/copystructure # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/go-testing-interface v1.0.0 -## explicit -github.com/mitchellh/go-testing-interface # github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -852,35 +940,34 @@ github.com/modern-go/reflect2 # github.com/mostynb/go-grpc-compression v1.2.3 ## explicit; go 1.17 github.com/mostynb/go-grpc-compression/internal/snappy +github.com/mostynb/go-grpc-compression/internal/zstd github.com/mostynb/go-grpc-compression/nonclobbering/snappy +github.com/mostynb/go-grpc-compression/nonclobbering/zstd # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack -# github.com/oklog/run v1.1.0 -## explicit; go 1.13 -github.com/oklog/run # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporter/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.102.0 -## explicit; go 1.21.0 -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/localhostgate -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.116.0 +## explicit; go 1.22.0 +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/parseutils @@ -889,8 +976,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils/internal/ctimefmt github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.97.0 -## explicit; go 1.21 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr @@ -902,65 +989,70 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filter github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka/awsmsk -# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.97.0 -## explicit; go 1.21 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic v0.116.0 +## explicit; go 1.22.0 +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/kafka/topic +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/logging github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlscope github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/internal/ottlcommon github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/azure -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus -# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/internal/zipkin github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2 -# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.97.0 -## explicit; go 1.21 +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver/internal/metadata -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/metadata github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/ocmetrics github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver/internal/octrace -# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.102.0 -## explicit; go 1.21.0 +# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.116.0 +## explicit; go 1.22.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver/internal/metadata # github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e @@ -1008,8 +1100,8 @@ github.com/parquet-go/parquet-go/internal/bytealg github.com/parquet-go/parquet-go/internal/debug github.com/parquet-go/parquet-go/internal/unsafecast github.com/parquet-go/parquet-go/sparse -# github.com/pelletier/go-toml/v2 v2.2.2 -## explicit; go 1.16 +# github.com/pelletier/go-toml/v2 v2.2.3 +## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger @@ -1034,14 +1126,16 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c +# github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 ## explicit; go 1.14 github.com/power-devops/perfstat # github.com/prometheus/alertmanager v0.27.0 ## explicit; go 1.21 github.com/prometheus/alertmanager/api/v2/models -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/collectors/version @@ -1055,8 +1149,8 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.61.0 +## explicit; go 1.21 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates @@ -1074,7 +1168,7 @@ github.com/prometheus/exporter-toolkit/web github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.54.0 +# github.com/prometheus/prometheus v0.54.1 ## explicit; go 1.21.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1129,19 +1223,19 @@ github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/zeropool github.com/prometheus/prometheus/web/api/v1 -# github.com/prometheus/statsd_exporter v0.26.0 -## explicit; go 1.18 +# github.com/prometheus/statsd_exporter v0.26.1 +## explicit; go 1.20 github.com/prometheus/statsd_exporter/pkg/level # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics -# github.com/relvacode/iso8601 v1.4.0 +# github.com/relvacode/iso8601 v1.6.0 ## explicit; go 1.13 github.com/relvacode/iso8601 # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rs/cors v1.11.0 +# github.com/rs/cors v1.11.1 ## explicit; go 1.13 github.com/rs/cors github.com/rs/cors/internal @@ -1163,17 +1257,14 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 -# github.com/shirou/gopsutil/v3 v3.24.4 -## explicit; go 1.15 -github.com/shirou/gopsutil/v3/common -github.com/shirou/gopsutil/v3/cpu -github.com/shirou/gopsutil/v3/internal/common -github.com/shirou/gopsutil/v3/mem -github.com/shirou/gopsutil/v3/net -github.com/shirou/gopsutil/v3/process -# github.com/shoenig/go-m1cpu v0.1.6 -## explicit; go 1.20 -github.com/shoenig/go-m1cpu +# github.com/shirou/gopsutil/v4 v4.24.11 +## explicit; go 1.18 +github.com/shirou/gopsutil/v4/common +github.com/shirou/gopsutil/v4/cpu +github.com/shirou/gopsutil/v4/internal/common +github.com/shirou/gopsutil/v4/mem +github.com/shirou/gopsutil/v4/net +github.com/shirou/gopsutil/v4/process # github.com/soheilhy/cmux v0.1.5 ## explicit; go 1.11 github.com/soheilhy/cmux @@ -1197,7 +1288,7 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.6.0 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.8.0 +# github.com/spf13/cobra v1.8.1 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 @@ -1222,20 +1313,24 @@ github.com/stoewer/parquet-cli/pkg/output # github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 github.com/stretchr/objx -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock github.com/stretchr/testify/require # github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tklauser/go-sysconf v0.3.12 -## explicit; go 1.13 +# github.com/tklauser/go-sysconf v0.3.14 +## explicit; go 1.18 github.com/tklauser/go-sysconf -# github.com/tklauser/numcpus v0.6.1 -## explicit; go 1.13 +# github.com/tklauser/numcpus v0.8.0 +## explicit; go 1.18 github.com/tklauser/numcpus +# github.com/ua-parser/uap-go v0.0.0-20241012191800-bbb40edc15aa +## explicit +github.com/ua-parser/uap-go/uaparser # github.com/uber-go/atomic v1.4.0 ## explicit github.com/uber-go/atomic @@ -1338,128 +1433,156 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/client -go.opentelemetry.io/collector/internal/fanoutconsumer +# go.opentelemetry.io/auto/sdk v1.1.0 +## explicit; go 1.22.0 +go.opentelemetry.io/auto/sdk +go.opentelemetry.io/auto/sdk/internal/telemetry +# go.opentelemetry.io/collector v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/internal/httphelper -go.opentelemetry.io/collector/internal/localhostgate -go.opentelemetry.io/collector/internal/obsreportconfig -go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics -go.opentelemetry.io/collector/internal/sharedcomponent -# go.opentelemetry.io/collector/component v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/client v1.22.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/client +# go.opentelemetry.io/collector/component v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/component +# go.opentelemetry.io/collector/component/componentstatus v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/component/componentstatus +# go.opentelemetry.io/collector/component/componenttest v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/component/componenttest -# go.opentelemetry.io/collector/config/configauth v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configauth v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configauth -# go.opentelemetry.io/collector/config/configcompression v1.9.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configcompression v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configcompression -# go.opentelemetry.io/collector/config/configgrpc v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configgrpc v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configgrpc -go.opentelemetry.io/collector/config/configgrpc/internal -# go.opentelemetry.io/collector/config/confighttp v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/confighttp v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/confighttp -# go.opentelemetry.io/collector/config/confignet v0.102.1 -## explicit; go 1.21.0 +go.opentelemetry.io/collector/config/confighttp/internal +# go.opentelemetry.io/collector/config/confignet v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/confignet -# go.opentelemetry.io/collector/config/configopaque v1.18.0 +# go.opentelemetry.io/collector/config/configopaque v1.22.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configopaque -# go.opentelemetry.io/collector/config/configretry v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configretry v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configretry -# go.opentelemetry.io/collector/config/configtelemetry v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/configtelemetry v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configtelemetry -# go.opentelemetry.io/collector/config/configtls v1.18.0 +# go.opentelemetry.io/collector/config/configtls v1.22.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/config/configtls -# go.opentelemetry.io/collector/config/internal v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/config/internal v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/config/internal -# go.opentelemetry.io/collector/confmap v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/confmap v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/confmap -go.opentelemetry.io/collector/confmap/internal/envvar go.opentelemetry.io/collector/confmap/internal/mapstructure -go.opentelemetry.io/collector/confmap/provider/internal -go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider -# go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/converter/expandconverter -# go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/envprovider -# go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/fileprovider -# go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/httpprovider -# go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/httpsprovider -# go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.1 -## explicit; go 1.21.0 -go.opentelemetry.io/collector/confmap/provider/yamlprovider -# go.opentelemetry.io/collector/connector v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/connector v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/connector -# go.opentelemetry.io/collector/consumer v0.102.1 -## explicit; go 1.21.0 +go.opentelemetry.io/collector/connector/internal +# go.opentelemetry.io/collector/connector/connectortest v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/connector/connectortest +# go.opentelemetry.io/collector/connector/xconnector v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/connector/xconnector +# go.opentelemetry.io/collector/consumer v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/consumer +go.opentelemetry.io/collector/consumer/internal +# go.opentelemetry.io/collector/consumer/consumererror v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/consumer/consumererror +go.opentelemetry.io/collector/consumer/consumererror/internal +# go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror +# go.opentelemetry.io/collector/consumer/consumertest v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/consumer/consumertest -# go.opentelemetry.io/collector/exporter v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/consumer/xconsumer v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/consumer/xconsumer +# go.opentelemetry.io/collector/exporter v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/exporter go.opentelemetry.io/collector/exporter/exporterbatcher go.opentelemetry.io/collector/exporter/exporterhelper +go.opentelemetry.io/collector/exporter/exporterhelper/internal go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata go.opentelemetry.io/collector/exporter/exporterqueue -go.opentelemetry.io/collector/exporter/exportertest +go.opentelemetry.io/collector/exporter/internal go.opentelemetry.io/collector/exporter/internal/experr go.opentelemetry.io/collector/exporter/internal/queue -# go.opentelemetry.io/collector/exporter/otlpexporter v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper +# go.opentelemetry.io/collector/exporter/exportertest v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/exportertest +# go.opentelemetry.io/collector/exporter/otlpexporter v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/exporter/otlpexporter go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata -# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/exporter/otlphttpexporter v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/exporter/otlphttpexporter go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata -# go.opentelemetry.io/collector/extension v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/exporter/xexporter v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/exporter/xexporter +# go.opentelemetry.io/collector/extension v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/extension -go.opentelemetry.io/collector/extension/experimental/storage -# go.opentelemetry.io/collector/extension/auth v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/extension/auth v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/extension/auth -# go.opentelemetry.io/collector/featuregate v1.9.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/extension/experimental/storage v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/extension/experimental/storage +# go.opentelemetry.io/collector/extension/extensioncapabilities v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/extension/extensioncapabilities +# go.opentelemetry.io/collector/extension/extensiontest v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/extension/extensiontest +# go.opentelemetry.io/collector/featuregate v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/featuregate -# go.opentelemetry.io/collector/otelcol v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/internal/fanoutconsumer v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/internal/fanoutconsumer +# go.opentelemetry.io/collector/internal/sharedcomponent v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/internal/sharedcomponent +# go.opentelemetry.io/collector/otelcol v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/otelcol go.opentelemetry.io/collector/otelcol/internal/configunmarshaler go.opentelemetry.io/collector/otelcol/internal/grpclog -# go.opentelemetry.io/collector/pdata v1.12.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/pdata v1.22.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1experimental +go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/profiles/v1development go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1 -go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental +go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1development go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1 go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 go.opentelemetry.io/collector/pdata/internal/json @@ -1471,73 +1594,107 @@ go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp go.opentelemetry.io/collector/pdata/ptrace go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp -# go.opentelemetry.io/collector/pdata/testdata v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/pdata/pprofile v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pdata/pprofile +go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp +# go.opentelemetry.io/collector/pdata/testdata v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/testdata -# go.opentelemetry.io/collector/processor v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/pipeline v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pipeline +go.opentelemetry.io/collector/pipeline/internal/globalsignal +# go.opentelemetry.io/collector/pipeline/xpipeline v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/pipeline/xpipeline +# go.opentelemetry.io/collector/processor v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/processor +go.opentelemetry.io/collector/processor/internal go.opentelemetry.io/collector/processor/processorhelper go.opentelemetry.io/collector/processor/processorhelper/internal/metadata -# go.opentelemetry.io/collector/receiver v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/processor/processortest v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/processor/processortest +# go.opentelemetry.io/collector/processor/xprocessor v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/processor/xprocessor +# go.opentelemetry.io/collector/receiver v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/receiver +go.opentelemetry.io/collector/receiver/internal go.opentelemetry.io/collector/receiver/receiverhelper go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata -go.opentelemetry.io/collector/receiver/receivertest -# go.opentelemetry.io/collector/receiver/otlpreceiver v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/receiver/otlpreceiver v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/receiver/otlpreceiver go.opentelemetry.io/collector/receiver/otlpreceiver/internal/errors go.opentelemetry.io/collector/receiver/otlpreceiver/internal/logs go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metadata go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics +go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace -# go.opentelemetry.io/collector/semconv v0.105.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/receiver/receivertest v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/receiver/receivertest +# go.opentelemetry.io/collector/receiver/xreceiver v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/receiver/xreceiver +# go.opentelemetry.io/collector/semconv v0.116.0 +## explicit; go 1.22.0 +go.opentelemetry.io/collector/semconv/v1.12.0 go.opentelemetry.io/collector/semconv/v1.13.0 +go.opentelemetry.io/collector/semconv/v1.16.0 go.opentelemetry.io/collector/semconv/v1.18.0 +go.opentelemetry.io/collector/semconv/v1.25.0 +go.opentelemetry.io/collector/semconv/v1.26.0 +go.opentelemetry.io/collector/semconv/v1.27.0 go.opentelemetry.io/collector/semconv/v1.6.1 go.opentelemetry.io/collector/semconv/v1.9.0 -# go.opentelemetry.io/collector/service v0.102.1 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/service v0.116.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/service go.opentelemetry.io/collector/service/extensions +go.opentelemetry.io/collector/service/internal/builders go.opentelemetry.io/collector/service/internal/capabilityconsumer go.opentelemetry.io/collector/service/internal/components go.opentelemetry.io/collector/service/internal/graph +go.opentelemetry.io/collector/service/internal/metadata go.opentelemetry.io/collector/service/internal/proctelemetry go.opentelemetry.io/collector/service/internal/resource -go.opentelemetry.io/collector/service/internal/servicetelemetry go.opentelemetry.io/collector/service/internal/status go.opentelemetry.io/collector/service/internal/zpages go.opentelemetry.io/collector/service/pipelines go.opentelemetry.io/collector/service/telemetry -go.opentelemetry.io/collector/service/telemetry/internal -# go.opentelemetry.io/contrib/bridges/prometheus v0.53.0 -## explicit; go 1.21 +go.opentelemetry.io/collector/service/telemetry/internal/otelinit +# go.opentelemetry.io/contrib/bridges/otelzap v0.8.0 +## explicit; go 1.22.0 +go.opentelemetry.io/contrib/bridges/otelzap +# go.opentelemetry.io/contrib/bridges/prometheus v0.58.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/bridges/prometheus -# go.opentelemetry.io/contrib/config v0.7.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/config v0.10.0 +## explicit; go 1.22 go.opentelemetry.io/contrib/config -# go.opentelemetry.io/contrib/exporters/autoexport v0.53.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/exporters/autoexport v0.58.0 +## explicit; go 1.22.7 go.opentelemetry.io/contrib/exporters/autoexport -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 +## explicit; go 1.22.7 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/contrib/propagators/b3 v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/propagators/b3 v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/propagators/b3 -# go.opentelemetry.io/otel v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -1552,15 +1709,15 @@ go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.25.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/bridge/opencensus v1.27.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/bridge/opencensus v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/bridge/opencensus go.opentelemetry.io/otel/bridge/opencensus/internal go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc -# go.opentelemetry.io/otel/bridge/opentracing v1.26.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/bridge/opentracing v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/bridge/opentracing go.opentelemetry.io/otel/bridge/opentracing/migration # go.opentelemetry.io/otel/exporters/jaeger v1.17.0 @@ -1570,69 +1727,76 @@ go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift -# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.9.0 +## explicit; go 1.22.7 +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/transform +# go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.9.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 +## explicit; go 1.22.7 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry -# go.opentelemetry.io/otel/exporters/prometheus v0.50.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/prometheus v0.55.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/prometheus -# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.9.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/stdout/stdoutlog -# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace -# go.opentelemetry.io/otel/log v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/log v0.9.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/log go.opentelemetry.io/otel/log/embedded +go.opentelemetry.io/otel/log/global +go.opentelemetry.io/otel/log/internal/global go.opentelemetry.io/otel/log/noop -# go.opentelemetry.io/otel/metric v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/metric v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/sdk v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env @@ -1640,24 +1804,25 @@ go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/sdk/log v0.4.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/sdk/log v0.9.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk/log -# go.opentelemetry.io/otel/sdk/metric v1.28.0 -## explicit; go 1.21 +go.opentelemetry.io/otel/sdk/log/internal/x +# go.opentelemetry.io/otel/sdk/metric v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk/metric +go.opentelemetry.io/otel/sdk/metric/exemplar go.opentelemetry.io/otel/sdk/metric/internal go.opentelemetry.io/otel/sdk/metric/internal/aggregate -go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/trace v1.33.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.4.0 +## explicit; go 1.22.7 go.opentelemetry.io/proto/otlp/collector/logs/v1 go.opentelemetry.io/proto/otlp/collector/metrics/v1 go.opentelemetry.io/proto/otlp/collector/trace/v1 @@ -1705,20 +1870,24 @@ golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.19.0 -## explicit; go 1.18 +# golang.org/x/mod v0.22.0 +## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.31.0 +# golang.org/x/net v0.32.0 ## explicit; go 1.18 golang.org/x/net/bpf +golang.org/x/net/context +golang.org/x/net/html +golang.org/x/net/html/atom +golang.org/x/net/html/charset golang.org/x/net/http/httpguts golang.org/x/net/http/httpproxy golang.org/x/net/http2 @@ -1735,7 +1904,7 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.21.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1764,6 +1933,7 @@ golang.org/x/sys/windows/svc/eventlog ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap +golang.org/x/text/encoding/htmlindex golang.org/x/text/encoding/ianaindex golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier @@ -1772,7 +1942,11 @@ golang.org/x/text/encoding/korean golang.org/x/text/encoding/simplifiedchinese golang.org/x/text/encoding/traditionalchinese golang.org/x/text/encoding/unicode +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact +golang.org/x/text/internal/tag golang.org/x/text/internal/utf8internal +golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -1781,11 +1955,12 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.23.0 -## explicit; go 1.19 +# golang.org/x/tools v0.27.0 +## explicit; go 1.22.0 golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -1796,11 +1971,11 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# gonum.org/v1/gonum v0.15.0 -## explicit; go 1.21 +# gonum.org/v1/gonum v0.15.1 +## explicit; go 1.22 gonum.org/v1/gonum/blas gonum.org/v1/gonum/blas/blas64 gonum.org/v1/gonum/blas/cblas128 @@ -1809,7 +1984,6 @@ gonum.org/v1/gonum/floats gonum.org/v1/gonum/floats/scalar gonum.org/v1/gonum/graph gonum.org/v1/gonum/graph/internal/linear -gonum.org/v1/gonum/graph/internal/ordered gonum.org/v1/gonum/graph/internal/set gonum.org/v1/gonum/graph/iterator gonum.org/v1/gonum/graph/set/uid @@ -1822,6 +1996,7 @@ gonum.org/v1/gonum/internal/asm/f32 gonum.org/v1/gonum/internal/asm/f64 gonum.org/v1/gonum/internal/cmplx64 gonum.org/v1/gonum/internal/math32 +gonum.org/v1/gonum/internal/order gonum.org/v1/gonum/lapack gonum.org/v1/gonum/lapack/gonum gonum.org/v1/gonum/lapack/lapack64 @@ -1850,18 +2025,18 @@ google.golang.org/api/transport/http/internal/propagation ## explicit; go 1.20 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 +## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d -## explicit; go 1.20 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 +## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 -## explicit; go 1.21 +# google.golang.org/grpc v1.68.1 +## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -1871,6 +2046,8 @@ google.golang.org/grpc/balancer/grpclb google.golang.org/grpc/balancer/grpclb/grpc_lb_v1 google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -1890,7 +2067,9 @@ google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -1915,18 +2094,16 @@ google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/stats google.golang.org/grpc/internal/status google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/internal/transport/networktype google.golang.org/grpc/internal/xds google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/peer -google.golang.org/grpc/reflection -google.golang.org/grpc/reflection/grpc_reflection_v1 -google.golang.org/grpc/reflection/grpc_reflection_v1alpha -google.golang.org/grpc/reflection/internal google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual